1
2
3
4
5
6
7
8#include <linux/kernel.h>
9#include <linux/bitops.h>
10#include <linux/types.h>
11#include <linux/module.h>
12#include <linux/list.h>
13#include <linux/pci.h>
14#include <linux/dma-mapping.h>
15#include <linux/pagemap.h>
16#include <linux/sched.h>
17#include <linux/slab.h>
18#include <linux/dmapool.h>
19#include <linux/mempool.h>
20#include <linux/spinlock.h>
21#include <linux/kthread.h>
22#include <linux/interrupt.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/in.h>
26#include <linux/ip.h>
27#include <linux/ipv6.h>
28#include <net/ipv6.h>
29#include <linux/tcp.h>
30#include <linux/udp.h>
31#include <linux/if_arp.h>
32#include <linux/if_ether.h>
33#include <linux/netdevice.h>
34#include <linux/etherdevice.h>
35#include <linux/ethtool.h>
36#include <linux/if_vlan.h>
37#include <linux/skbuff.h>
38#include <linux/delay.h>
39#include <linux/mm.h>
40#include <linux/vmalloc.h>
41#include <linux/prefetch.h>
42#include <net/ip6_checksum.h>
43
44#include "qlge.h"
45#include "qlge_devlink.h"
46
47char qlge_driver_name[] = DRV_NAME;
48const char qlge_driver_version[] = DRV_VERSION;
49
50MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
51MODULE_DESCRIPTION(DRV_STRING " ");
52MODULE_LICENSE("GPL");
53MODULE_VERSION(DRV_VERSION);
54
55static const u32 default_msg =
56 NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
57 NETIF_MSG_IFDOWN |
58 NETIF_MSG_IFUP |
59 NETIF_MSG_RX_ERR |
60 NETIF_MSG_TX_ERR |
61 NETIF_MSG_HW | NETIF_MSG_WOL | 0;
62
63static int debug = -1;
64module_param(debug, int, 0664);
65MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
66
67#define MSIX_IRQ 0
68#define MSI_IRQ 1
69#define LEG_IRQ 2
70static int qlge_irq_type = MSIX_IRQ;
71module_param(qlge_irq_type, int, 0664);
72MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
73
74static int qlge_mpi_coredump;
75module_param(qlge_mpi_coredump, int, 0);
76MODULE_PARM_DESC(qlge_mpi_coredump,
77 "Option to enable MPI firmware dump. Default is OFF - Do Not allocate memory. ");
78
79static int qlge_force_coredump;
80module_param(qlge_force_coredump, int, 0);
81MODULE_PARM_DESC(qlge_force_coredump,
82 "Option to allow force of firmware core dump. Default is OFF - Do not allow.");
83
84static const struct pci_device_id qlge_pci_tbl[] = {
85 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
86 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
87
88 {0,}
89};
90
91MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
92
93static int qlge_wol(struct qlge_adapter *);
94static void qlge_set_multicast_list(struct net_device *);
95static int qlge_adapter_down(struct qlge_adapter *);
96static int qlge_adapter_up(struct qlge_adapter *);
97
98
99
100
101
102static int qlge_sem_trylock(struct qlge_adapter *qdev, u32 sem_mask)
103{
104 u32 sem_bits = 0;
105
106 switch (sem_mask) {
107 case SEM_XGMAC0_MASK:
108 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
109 break;
110 case SEM_XGMAC1_MASK:
111 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
112 break;
113 case SEM_ICB_MASK:
114 sem_bits = SEM_SET << SEM_ICB_SHIFT;
115 break;
116 case SEM_MAC_ADDR_MASK:
117 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
118 break;
119 case SEM_FLASH_MASK:
120 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
121 break;
122 case SEM_PROBE_MASK:
123 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
124 break;
125 case SEM_RT_IDX_MASK:
126 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
127 break;
128 case SEM_PROC_REG_MASK:
129 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
130 break;
131 default:
132 netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
133 return -EINVAL;
134 }
135
136 qlge_write32(qdev, SEM, sem_bits | sem_mask);
137 return !(qlge_read32(qdev, SEM) & sem_bits);
138}
139
140int qlge_sem_spinlock(struct qlge_adapter *qdev, u32 sem_mask)
141{
142 unsigned int wait_count = 30;
143
144 do {
145 if (!qlge_sem_trylock(qdev, sem_mask))
146 return 0;
147 udelay(100);
148 } while (--wait_count);
149 return -ETIMEDOUT;
150}
151
152void qlge_sem_unlock(struct qlge_adapter *qdev, u32 sem_mask)
153{
154 qlge_write32(qdev, SEM, sem_mask);
155 qlge_read32(qdev, SEM);
156}
157
158
159
160
161
162
163int qlge_wait_reg_rdy(struct qlge_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
164{
165 u32 temp;
166 int count;
167
168 for (count = 0; count < UDELAY_COUNT; count++) {
169 temp = qlge_read32(qdev, reg);
170
171
172 if (temp & err_bit) {
173 netif_alert(qdev, probe, qdev->ndev,
174 "register 0x%.08x access error, value = 0x%.08x!.\n",
175 reg, temp);
176 return -EIO;
177 } else if (temp & bit) {
178 return 0;
179 }
180 udelay(UDELAY_DELAY);
181 }
182 netif_alert(qdev, probe, qdev->ndev,
183 "Timed out waiting for reg %x to come ready.\n", reg);
184 return -ETIMEDOUT;
185}
186
187
188
189
190static int qlge_wait_cfg(struct qlge_adapter *qdev, u32 bit)
191{
192 int count;
193 u32 temp;
194
195 for (count = 0; count < UDELAY_COUNT; count++) {
196 temp = qlge_read32(qdev, CFG);
197 if (temp & CFG_LE)
198 return -EIO;
199 if (!(temp & bit))
200 return 0;
201 udelay(UDELAY_DELAY);
202 }
203 return -ETIMEDOUT;
204}
205
206
207
208
209int qlge_write_cfg(struct qlge_adapter *qdev, void *ptr, int size, u32 bit,
210 u16 q_id)
211{
212 u64 map;
213 int status = 0;
214 int direction;
215 u32 mask;
216 u32 value;
217
218 if (bit & (CFG_LRQ | CFG_LR | CFG_LCQ))
219 direction = DMA_TO_DEVICE;
220 else
221 direction = DMA_FROM_DEVICE;
222
223 map = dma_map_single(&qdev->pdev->dev, ptr, size, direction);
224 if (dma_mapping_error(&qdev->pdev->dev, map)) {
225 netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
226 return -ENOMEM;
227 }
228
229 status = qlge_sem_spinlock(qdev, SEM_ICB_MASK);
230 if (status)
231 goto lock_failed;
232
233 status = qlge_wait_cfg(qdev, bit);
234 if (status) {
235 netif_err(qdev, ifup, qdev->ndev,
236 "Timed out waiting for CFG to come ready.\n");
237 goto exit;
238 }
239
240 qlge_write32(qdev, ICB_L, (u32)map);
241 qlge_write32(qdev, ICB_H, (u32)(map >> 32));
242
243 mask = CFG_Q_MASK | (bit << 16);
244 value = bit | (q_id << CFG_Q_SHIFT);
245 qlge_write32(qdev, CFG, (mask | value));
246
247
248
249
250 status = qlge_wait_cfg(qdev, bit);
251exit:
252 qlge_sem_unlock(qdev, SEM_ICB_MASK);
253lock_failed:
254 dma_unmap_single(&qdev->pdev->dev, map, size, direction);
255 return status;
256}
257
258
259int qlge_get_mac_addr_reg(struct qlge_adapter *qdev, u32 type, u16 index,
260 u32 *value)
261{
262 u32 offset = 0;
263 int status;
264
265 switch (type) {
266 case MAC_ADDR_TYPE_MULTI_MAC:
267 case MAC_ADDR_TYPE_CAM_MAC: {
268 status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
269 if (status)
270 break;
271 qlge_write32(qdev, MAC_ADDR_IDX,
272 (offset++) |
273 (index << MAC_ADDR_IDX_SHIFT) |
274 MAC_ADDR_ADR | MAC_ADDR_RS |
275 type);
276 status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MR, 0);
277 if (status)
278 break;
279 *value++ = qlge_read32(qdev, MAC_ADDR_DATA);
280 status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
281 if (status)
282 break;
283 qlge_write32(qdev, MAC_ADDR_IDX,
284 (offset++) |
285 (index << MAC_ADDR_IDX_SHIFT) |
286 MAC_ADDR_ADR | MAC_ADDR_RS |
287 type);
288 status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MR, 0);
289 if (status)
290 break;
291 *value++ = qlge_read32(qdev, MAC_ADDR_DATA);
292 if (type == MAC_ADDR_TYPE_CAM_MAC) {
293 status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX,
294 MAC_ADDR_MW, 0);
295 if (status)
296 break;
297 qlge_write32(qdev, MAC_ADDR_IDX,
298 (offset++) |
299 (index
300 << MAC_ADDR_IDX_SHIFT) |
301 MAC_ADDR_ADR |
302 MAC_ADDR_RS | type);
303 status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX,
304 MAC_ADDR_MR, 0);
305 if (status)
306 break;
307 *value++ = qlge_read32(qdev, MAC_ADDR_DATA);
308 }
309 break;
310 }
311 case MAC_ADDR_TYPE_VLAN:
312 case MAC_ADDR_TYPE_MULTI_FLTR:
313 default:
314 netif_crit(qdev, ifup, qdev->ndev,
315 "Address type %d not yet supported.\n", type);
316 status = -EPERM;
317 }
318 return status;
319}
320
321
322
323
324static int qlge_set_mac_addr_reg(struct qlge_adapter *qdev, u8 *addr, u32 type,
325 u16 index)
326{
327 u32 offset = 0;
328 int status = 0;
329
330 switch (type) {
331 case MAC_ADDR_TYPE_MULTI_MAC: {
332 u32 upper = (addr[0] << 8) | addr[1];
333 u32 lower = (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
334 (addr[5]);
335
336 status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
337 if (status)
338 break;
339 qlge_write32(qdev, MAC_ADDR_IDX,
340 (offset++) | (index << MAC_ADDR_IDX_SHIFT) | type |
341 MAC_ADDR_E);
342 qlge_write32(qdev, MAC_ADDR_DATA, lower);
343 status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
344 if (status)
345 break;
346 qlge_write32(qdev, MAC_ADDR_IDX,
347 (offset++) | (index << MAC_ADDR_IDX_SHIFT) | type |
348 MAC_ADDR_E);
349
350 qlge_write32(qdev, MAC_ADDR_DATA, upper);
351 status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
352 break;
353 }
354 case MAC_ADDR_TYPE_CAM_MAC: {
355 u32 cam_output;
356 u32 upper = (addr[0] << 8) | addr[1];
357 u32 lower = (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
358 (addr[5]);
359 status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
360 if (status)
361 break;
362 qlge_write32(qdev, MAC_ADDR_IDX,
363 (offset++) |
364 (index << MAC_ADDR_IDX_SHIFT) |
365 type);
366 qlge_write32(qdev, MAC_ADDR_DATA, lower);
367 status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
368 if (status)
369 break;
370 qlge_write32(qdev, MAC_ADDR_IDX,
371 (offset++) |
372 (index << MAC_ADDR_IDX_SHIFT) |
373 type);
374 qlge_write32(qdev, MAC_ADDR_DATA, upper);
375 status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
376 if (status)
377 break;
378 qlge_write32(qdev, MAC_ADDR_IDX,
379 (offset) |
380 (index << MAC_ADDR_IDX_SHIFT) |
381 type);
382
383
384
385
386 cam_output = (CAM_OUT_ROUTE_NIC |
387 (qdev->func << CAM_OUT_FUNC_SHIFT) |
388 (0 << CAM_OUT_CQ_ID_SHIFT));
389 if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
390 cam_output |= CAM_OUT_RV;
391
392 qlge_write32(qdev, MAC_ADDR_DATA, cam_output);
393 break;
394 }
395 case MAC_ADDR_TYPE_VLAN: {
396 u32 enable_bit = *((u32 *)&addr[0]);
397
398
399
400
401
402 status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
403 if (status)
404 break;
405 qlge_write32(qdev, MAC_ADDR_IDX,
406 offset |
407 (index << MAC_ADDR_IDX_SHIFT) |
408 type |
409 enable_bit);
410 break;
411 }
412 case MAC_ADDR_TYPE_MULTI_FLTR:
413 default:
414 netif_crit(qdev, ifup, qdev->ndev,
415 "Address type %d not yet supported.\n", type);
416 status = -EPERM;
417 }
418 return status;
419}
420
421
422
423
424
425static int qlge_set_mac_addr(struct qlge_adapter *qdev, int set)
426{
427 int status;
428 char zero_mac_addr[ETH_ALEN];
429 char *addr;
430
431 if (set) {
432 addr = &qdev->current_mac_addr[0];
433 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
434 "Set Mac addr %pM\n", addr);
435 } else {
436 eth_zero_addr(zero_mac_addr);
437 addr = &zero_mac_addr[0];
438 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
439 "Clearing MAC address\n");
440 }
441 status = qlge_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
442 if (status)
443 return status;
444 status = qlge_set_mac_addr_reg(qdev, (u8 *)addr,
445 MAC_ADDR_TYPE_CAM_MAC,
446 qdev->func * MAX_CQ);
447 qlge_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
448 if (status)
449 netif_err(qdev, ifup, qdev->ndev,
450 "Failed to init mac address.\n");
451 return status;
452}
453
454void qlge_link_on(struct qlge_adapter *qdev)
455{
456 netif_err(qdev, link, qdev->ndev, "Link is up.\n");
457 netif_carrier_on(qdev->ndev);
458 qlge_set_mac_addr(qdev, 1);
459}
460
461void qlge_link_off(struct qlge_adapter *qdev)
462{
463 netif_err(qdev, link, qdev->ndev, "Link is down.\n");
464 netif_carrier_off(qdev->ndev);
465 qlge_set_mac_addr(qdev, 0);
466}
467
468
469
470
471int qlge_get_routing_reg(struct qlge_adapter *qdev, u32 index, u32 *value)
472{
473 int status = 0;
474
475 status = qlge_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
476 if (status)
477 goto exit;
478
479 qlge_write32(qdev, RT_IDX,
480 RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
481 status = qlge_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
482 if (status)
483 goto exit;
484 *value = qlge_read32(qdev, RT_DATA);
485exit:
486 return status;
487}
488
489
490
491
492
493
494static int qlge_set_routing_reg(struct qlge_adapter *qdev, u32 index, u32 mask,
495 int enable)
496{
497 int status = -EINVAL;
498 u32 value = 0;
499
500 switch (mask) {
501 case RT_IDX_CAM_HIT:
502 {
503 value = RT_IDX_DST_CAM_Q |
504 RT_IDX_TYPE_NICQ |
505 (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);
506 break;
507 }
508 case RT_IDX_VALID:
509 {
510 value = RT_IDX_DST_DFLT_Q |
511 RT_IDX_TYPE_NICQ |
512 (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);
513 break;
514 }
515 case RT_IDX_ERR:
516 {
517 value = RT_IDX_DST_DFLT_Q |
518 RT_IDX_TYPE_NICQ |
519 (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);
520 break;
521 }
522 case RT_IDX_IP_CSUM_ERR:
523 {
524 value = RT_IDX_DST_DFLT_Q |
525 RT_IDX_TYPE_NICQ |
526 (RT_IDX_IP_CSUM_ERR_SLOT <<
527 RT_IDX_IDX_SHIFT);
528 break;
529 }
530 case RT_IDX_TU_CSUM_ERR:
531 {
532 value = RT_IDX_DST_DFLT_Q |
533 RT_IDX_TYPE_NICQ |
534 (RT_IDX_TCP_UDP_CSUM_ERR_SLOT <<
535 RT_IDX_IDX_SHIFT);
536 break;
537 }
538 case RT_IDX_BCAST:
539 {
540 value = RT_IDX_DST_DFLT_Q |
541 RT_IDX_TYPE_NICQ |
542 (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);
543 break;
544 }
545 case RT_IDX_MCAST:
546 {
547 value = RT_IDX_DST_DFLT_Q |
548 RT_IDX_TYPE_NICQ |
549 (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);
550 break;
551 }
552 case RT_IDX_MCAST_MATCH:
553 {
554 value = RT_IDX_DST_DFLT_Q |
555 RT_IDX_TYPE_NICQ |
556 (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);
557 break;
558 }
559 case RT_IDX_RSS_MATCH:
560 {
561 value = RT_IDX_DST_RSS |
562 RT_IDX_TYPE_NICQ |
563 (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);
564 break;
565 }
566 case 0:
567 {
568 value = RT_IDX_DST_DFLT_Q |
569 RT_IDX_TYPE_NICQ |
570 (index << RT_IDX_IDX_SHIFT);
571 break;
572 }
573 default:
574 netif_err(qdev, ifup, qdev->ndev,
575 "Mask type %d not yet supported.\n", mask);
576 status = -EPERM;
577 goto exit;
578 }
579
580 if (value) {
581 status = qlge_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
582 if (status)
583 goto exit;
584 value |= (enable ? RT_IDX_E : 0);
585 qlge_write32(qdev, RT_IDX, value);
586 qlge_write32(qdev, RT_DATA, enable ? mask : 0);
587 }
588exit:
589 return status;
590}
591
592static void qlge_enable_interrupts(struct qlge_adapter *qdev)
593{
594 qlge_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
595}
596
597static void qlge_disable_interrupts(struct qlge_adapter *qdev)
598{
599 qlge_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
600}
601
602static void qlge_enable_completion_interrupt(struct qlge_adapter *qdev, u32 intr)
603{
604 struct intr_context *ctx = &qdev->intr_context[intr];
605
606 qlge_write32(qdev, INTR_EN, ctx->intr_en_mask);
607}
608
609static void qlge_disable_completion_interrupt(struct qlge_adapter *qdev, u32 intr)
610{
611 struct intr_context *ctx = &qdev->intr_context[intr];
612
613 qlge_write32(qdev, INTR_EN, ctx->intr_dis_mask);
614}
615
616static void qlge_enable_all_completion_interrupts(struct qlge_adapter *qdev)
617{
618 int i;
619
620 for (i = 0; i < qdev->intr_count; i++)
621 qlge_enable_completion_interrupt(qdev, i);
622}
623
624static int qlge_validate_flash(struct qlge_adapter *qdev, u32 size, const char *str)
625{
626 int status, i;
627 u16 csum = 0;
628 __le16 *flash = (__le16 *)&qdev->flash;
629
630 status = strncmp((char *)&qdev->flash, str, 4);
631 if (status) {
632 netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
633 return status;
634 }
635
636 for (i = 0; i < size; i++)
637 csum += le16_to_cpu(*flash++);
638
639 if (csum)
640 netif_err(qdev, ifup, qdev->ndev,
641 "Invalid flash checksum, csum = 0x%.04x.\n", csum);
642
643 return csum;
644}
645
646static int qlge_read_flash_word(struct qlge_adapter *qdev, int offset, __le32 *data)
647{
648 int status = 0;
649
650 status = qlge_wait_reg_rdy(qdev,
651 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
652 if (status)
653 goto exit;
654
655 qlge_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
656
657 status = qlge_wait_reg_rdy(qdev,
658 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
659 if (status)
660 goto exit;
661
662
663
664
665 *data = cpu_to_le32(qlge_read32(qdev, FLASH_DATA));
666exit:
667 return status;
668}
669
670static int qlge_get_8000_flash_params(struct qlge_adapter *qdev)
671{
672 u32 i, size;
673 int status;
674 __le32 *p = (__le32 *)&qdev->flash;
675 u32 offset;
676 u8 mac_addr[6];
677
678
679
680
681 if (!qdev->port)
682 offset = FUNC0_FLASH_OFFSET / sizeof(u32);
683 else
684 offset = FUNC1_FLASH_OFFSET / sizeof(u32);
685
686 if (qlge_sem_spinlock(qdev, SEM_FLASH_MASK))
687 return -ETIMEDOUT;
688
689 size = sizeof(struct flash_params_8000) / sizeof(u32);
690 for (i = 0; i < size; i++, p++) {
691 status = qlge_read_flash_word(qdev, i + offset, p);
692 if (status) {
693 netif_err(qdev, ifup, qdev->ndev,
694 "Error reading flash.\n");
695 goto exit;
696 }
697 }
698
699 status = qlge_validate_flash(qdev,
700 sizeof(struct flash_params_8000) /
701 sizeof(u16),
702 "8000");
703 if (status) {
704 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
705 status = -EINVAL;
706 goto exit;
707 }
708
709
710
711
712 if (qdev->flash.flash_params_8000.data_type1 == 2)
713 memcpy(mac_addr,
714 qdev->flash.flash_params_8000.mac_addr1,
715 qdev->ndev->addr_len);
716 else
717 memcpy(mac_addr,
718 qdev->flash.flash_params_8000.mac_addr,
719 qdev->ndev->addr_len);
720
721 if (!is_valid_ether_addr(mac_addr)) {
722 netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
723 status = -EINVAL;
724 goto exit;
725 }
726
727 memcpy(qdev->ndev->dev_addr,
728 mac_addr,
729 qdev->ndev->addr_len);
730
731exit:
732 qlge_sem_unlock(qdev, SEM_FLASH_MASK);
733 return status;
734}
735
736static int qlge_get_8012_flash_params(struct qlge_adapter *qdev)
737{
738 int i;
739 int status;
740 __le32 *p = (__le32 *)&qdev->flash;
741 u32 offset = 0;
742 u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
743
744
745
746
747 if (qdev->port)
748 offset = size;
749
750 if (qlge_sem_spinlock(qdev, SEM_FLASH_MASK))
751 return -ETIMEDOUT;
752
753 for (i = 0; i < size; i++, p++) {
754 status = qlge_read_flash_word(qdev, i + offset, p);
755 if (status) {
756 netif_err(qdev, ifup, qdev->ndev,
757 "Error reading flash.\n");
758 goto exit;
759 }
760 }
761
762 status = qlge_validate_flash(qdev,
763 sizeof(struct flash_params_8012) /
764 sizeof(u16),
765 "8012");
766 if (status) {
767 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
768 status = -EINVAL;
769 goto exit;
770 }
771
772 if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
773 status = -EINVAL;
774 goto exit;
775 }
776
777 memcpy(qdev->ndev->dev_addr,
778 qdev->flash.flash_params_8012.mac_addr,
779 qdev->ndev->addr_len);
780
781exit:
782 qlge_sem_unlock(qdev, SEM_FLASH_MASK);
783 return status;
784}
785
786
787
788
789
790static int qlge_write_xgmac_reg(struct qlge_adapter *qdev, u32 reg, u32 data)
791{
792 int status;
793
794 status = qlge_wait_reg_rdy(qdev,
795 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
796 if (status)
797 return status;
798
799 qlge_write32(qdev, XGMAC_DATA, data);
800
801 qlge_write32(qdev, XGMAC_ADDR, reg);
802 return status;
803}
804
805
806
807
808
809int qlge_read_xgmac_reg(struct qlge_adapter *qdev, u32 reg, u32 *data)
810{
811 int status = 0;
812
813 status = qlge_wait_reg_rdy(qdev,
814 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
815 if (status)
816 goto exit;
817
818 qlge_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
819
820 status = qlge_wait_reg_rdy(qdev,
821 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
822 if (status)
823 goto exit;
824
825 *data = qlge_read32(qdev, XGMAC_DATA);
826exit:
827 return status;
828}
829
830
831int qlge_read_xgmac_reg64(struct qlge_adapter *qdev, u32 reg, u64 *data)
832{
833 int status = 0;
834 u32 hi = 0;
835 u32 lo = 0;
836
837 status = qlge_read_xgmac_reg(qdev, reg, &lo);
838 if (status)
839 goto exit;
840
841 status = qlge_read_xgmac_reg(qdev, reg + 4, &hi);
842 if (status)
843 goto exit;
844
845 *data = (u64)lo | ((u64)hi << 32);
846
847exit:
848 return status;
849}
850
851static int qlge_8000_port_initialize(struct qlge_adapter *qdev)
852{
853 int status;
854
855
856
857
858 status = qlge_mb_about_fw(qdev);
859 if (status)
860 goto exit;
861 status = qlge_mb_get_fw_state(qdev);
862 if (status)
863 goto exit;
864
865 queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
866exit:
867 return status;
868}
869
870
871
872
873
874
875
876static int qlge_8012_port_initialize(struct qlge_adapter *qdev)
877{
878 int status = 0;
879 u32 data;
880
881 if (qlge_sem_trylock(qdev, qdev->xg_sem_mask)) {
882
883
884
885 netif_info(qdev, link, qdev->ndev,
886 "Another function has the semaphore, so wait for the port init bit to come ready.\n");
887 status = qlge_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
888 if (status) {
889 netif_crit(qdev, link, qdev->ndev,
890 "Port initialize timed out.\n");
891 }
892 return status;
893 }
894
895 netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
896
897 status = qlge_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
898 if (status)
899 goto end;
900 data |= GLOBAL_CFG_RESET;
901 status = qlge_write_xgmac_reg(qdev, GLOBAL_CFG, data);
902 if (status)
903 goto end;
904
905
906 data &= ~GLOBAL_CFG_RESET;
907 data |= GLOBAL_CFG_JUMBO;
908 data |= GLOBAL_CFG_TX_STAT_EN;
909 data |= GLOBAL_CFG_RX_STAT_EN;
910 status = qlge_write_xgmac_reg(qdev, GLOBAL_CFG, data);
911 if (status)
912 goto end;
913
914
915 status = qlge_read_xgmac_reg(qdev, TX_CFG, &data);
916 if (status)
917 goto end;
918 data &= ~TX_CFG_RESET;
919 data |= TX_CFG_EN;
920 status = qlge_write_xgmac_reg(qdev, TX_CFG, data);
921 if (status)
922 goto end;
923
924
925 status = qlge_read_xgmac_reg(qdev, RX_CFG, &data);
926 if (status)
927 goto end;
928 data &= ~RX_CFG_RESET;
929 data |= RX_CFG_EN;
930 status = qlge_write_xgmac_reg(qdev, RX_CFG, data);
931 if (status)
932 goto end;
933
934
935 status =
936 qlge_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
937 if (status)
938 goto end;
939 status =
940 qlge_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
941 if (status)
942 goto end;
943
944
945 qlge_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
946end:
947 qlge_sem_unlock(qdev, qdev->xg_sem_mask);
948 return status;
949}
950
951static inline unsigned int qlge_lbq_block_size(struct qlge_adapter *qdev)
952{
953 return PAGE_SIZE << qdev->lbq_buf_order;
954}
955
956static struct qlge_bq_desc *qlge_get_curr_buf(struct qlge_bq *bq)
957{
958 struct qlge_bq_desc *bq_desc;
959
960 bq_desc = &bq->queue[bq->next_to_clean];
961 bq->next_to_clean = QLGE_BQ_WRAP(bq->next_to_clean + 1);
962
963 return bq_desc;
964}
965
966static struct qlge_bq_desc *qlge_get_curr_lchunk(struct qlge_adapter *qdev,
967 struct rx_ring *rx_ring)
968{
969 struct qlge_bq_desc *lbq_desc = qlge_get_curr_buf(&rx_ring->lbq);
970
971 dma_sync_single_for_cpu(&qdev->pdev->dev, lbq_desc->dma_addr,
972 qdev->lbq_buf_size, DMA_FROM_DEVICE);
973
974 if ((lbq_desc->p.pg_chunk.offset + qdev->lbq_buf_size) ==
975 qlge_lbq_block_size(qdev)) {
976
977 dma_unmap_page(&qdev->pdev->dev, lbq_desc->dma_addr,
978 qlge_lbq_block_size(qdev), DMA_FROM_DEVICE);
979 }
980
981 return lbq_desc;
982}
983
984
985static void qlge_update_cq(struct rx_ring *rx_ring)
986{
987 rx_ring->cnsmr_idx++;
988 rx_ring->curr_entry++;
989 if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
990 rx_ring->cnsmr_idx = 0;
991 rx_ring->curr_entry = rx_ring->cq_base;
992 }
993}
994
995static void qlge_write_cq_idx(struct rx_ring *rx_ring)
996{
997 qlge_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
998}
999
1000static const char * const bq_type_name[] = {
1001 [QLGE_SB] = "sbq",
1002 [QLGE_LB] = "lbq",
1003};
1004
1005
1006static int qlge_refill_sb(struct rx_ring *rx_ring,
1007 struct qlge_bq_desc *sbq_desc, gfp_t gfp)
1008{
1009 struct qlge_adapter *qdev = rx_ring->qdev;
1010 struct sk_buff *skb;
1011
1012 if (sbq_desc->p.skb)
1013 return 0;
1014
1015 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1016 "ring %u sbq: getting new skb for index %d.\n",
1017 rx_ring->cq_id, sbq_desc->index);
1018
1019 skb = __netdev_alloc_skb(qdev->ndev, SMALL_BUFFER_SIZE, gfp);
1020 if (!skb)
1021 return -ENOMEM;
1022 skb_reserve(skb, QLGE_SB_PAD);
1023
1024 sbq_desc->dma_addr = dma_map_single(&qdev->pdev->dev, skb->data,
1025 SMALL_BUF_MAP_SIZE,
1026 DMA_FROM_DEVICE);
1027 if (dma_mapping_error(&qdev->pdev->dev, sbq_desc->dma_addr)) {
1028 netif_err(qdev, ifup, qdev->ndev, "PCI mapping failed.\n");
1029 dev_kfree_skb_any(skb);
1030 return -EIO;
1031 }
1032 *sbq_desc->buf_ptr = cpu_to_le64(sbq_desc->dma_addr);
1033
1034 sbq_desc->p.skb = skb;
1035 return 0;
1036}
1037
1038
1039static int qlge_refill_lb(struct rx_ring *rx_ring,
1040 struct qlge_bq_desc *lbq_desc, gfp_t gfp)
1041{
1042 struct qlge_adapter *qdev = rx_ring->qdev;
1043 struct qlge_page_chunk *master_chunk = &rx_ring->master_chunk;
1044
1045 if (!master_chunk->page) {
1046 struct page *page;
1047 dma_addr_t dma_addr;
1048
1049 page = alloc_pages(gfp | __GFP_COMP, qdev->lbq_buf_order);
1050 if (unlikely(!page))
1051 return -ENOMEM;
1052 dma_addr = dma_map_page(&qdev->pdev->dev, page, 0,
1053 qlge_lbq_block_size(qdev),
1054 DMA_FROM_DEVICE);
1055 if (dma_mapping_error(&qdev->pdev->dev, dma_addr)) {
1056 __free_pages(page, qdev->lbq_buf_order);
1057 netif_err(qdev, drv, qdev->ndev,
1058 "PCI mapping failed.\n");
1059 return -EIO;
1060 }
1061 master_chunk->page = page;
1062 master_chunk->va = page_address(page);
1063 master_chunk->offset = 0;
1064 rx_ring->chunk_dma_addr = dma_addr;
1065 }
1066
1067 lbq_desc->p.pg_chunk = *master_chunk;
1068 lbq_desc->dma_addr = rx_ring->chunk_dma_addr;
1069 *lbq_desc->buf_ptr = cpu_to_le64(lbq_desc->dma_addr +
1070 lbq_desc->p.pg_chunk.offset);
1071
1072
1073
1074
1075 master_chunk->offset += qdev->lbq_buf_size;
1076 if (master_chunk->offset == qlge_lbq_block_size(qdev)) {
1077 master_chunk->page = NULL;
1078 } else {
1079 master_chunk->va += qdev->lbq_buf_size;
1080 get_page(master_chunk->page);
1081 }
1082
1083 return 0;
1084}
1085
1086
1087static int qlge_refill_bq(struct qlge_bq *bq, gfp_t gfp)
1088{
1089 struct rx_ring *rx_ring = QLGE_BQ_CONTAINER(bq);
1090 struct qlge_adapter *qdev = rx_ring->qdev;
1091 struct qlge_bq_desc *bq_desc;
1092 int refill_count;
1093 int retval;
1094 int i;
1095
1096 refill_count = QLGE_BQ_WRAP(QLGE_BQ_ALIGN(bq->next_to_clean - 1) -
1097 bq->next_to_use);
1098 if (!refill_count)
1099 return 0;
1100
1101 i = bq->next_to_use;
1102 bq_desc = &bq->queue[i];
1103 i -= QLGE_BQ_LEN;
1104 do {
1105 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1106 "ring %u %s: try cleaning idx %d\n",
1107 rx_ring->cq_id, bq_type_name[bq->type], i);
1108
1109 if (bq->type == QLGE_SB)
1110 retval = qlge_refill_sb(rx_ring, bq_desc, gfp);
1111 else
1112 retval = qlge_refill_lb(rx_ring, bq_desc, gfp);
1113 if (retval < 0) {
1114 netif_err(qdev, ifup, qdev->ndev,
1115 "ring %u %s: Could not get a page chunk, idx %d\n",
1116 rx_ring->cq_id, bq_type_name[bq->type], i);
1117 break;
1118 }
1119
1120 bq_desc++;
1121 i++;
1122 if (unlikely(!i)) {
1123 bq_desc = &bq->queue[0];
1124 i -= QLGE_BQ_LEN;
1125 }
1126 refill_count--;
1127 } while (refill_count);
1128 i += QLGE_BQ_LEN;
1129
1130 if (bq->next_to_use != i) {
1131 if (QLGE_BQ_ALIGN(bq->next_to_use) != QLGE_BQ_ALIGN(i)) {
1132 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1133 "ring %u %s: updating prod idx = %d.\n",
1134 rx_ring->cq_id, bq_type_name[bq->type],
1135 i);
1136 qlge_write_db_reg(i, bq->prod_idx_db_reg);
1137 }
1138 bq->next_to_use = i;
1139 }
1140
1141 return retval;
1142}
1143
1144static void qlge_update_buffer_queues(struct rx_ring *rx_ring, gfp_t gfp,
1145 unsigned long delay)
1146{
1147 bool sbq_fail, lbq_fail;
1148
1149 sbq_fail = !!qlge_refill_bq(&rx_ring->sbq, gfp);
1150 lbq_fail = !!qlge_refill_bq(&rx_ring->lbq, gfp);
1151
1152
1153
1154
1155
1156
1157
1158 if ((sbq_fail && QLGE_BQ_HW_OWNED(&rx_ring->sbq) < 2) ||
1159 (lbq_fail && QLGE_BQ_HW_OWNED(&rx_ring->lbq) <
1160 DIV_ROUND_UP(9000, LARGE_BUFFER_MAX_SIZE)))
1161
1162
1163
1164
1165 queue_delayed_work_on(smp_processor_id(), system_long_wq,
1166 &rx_ring->refill_work, delay);
1167}
1168
1169static void qlge_slow_refill(struct work_struct *work)
1170{
1171 struct rx_ring *rx_ring = container_of(work, struct rx_ring,
1172 refill_work.work);
1173 struct napi_struct *napi = &rx_ring->napi;
1174
1175 napi_disable(napi);
1176 qlge_update_buffer_queues(rx_ring, GFP_KERNEL, HZ / 2);
1177 napi_enable(napi);
1178
1179 local_bh_disable();
1180
1181
1182
1183 napi_schedule(napi);
1184
1185 local_bh_enable();
1186}
1187
1188
1189
1190
1191static void qlge_unmap_send(struct qlge_adapter *qdev,
1192 struct tx_ring_desc *tx_ring_desc, int mapped)
1193{
1194 int i;
1195
1196 for (i = 0; i < mapped; i++) {
1197 if (i == 0 || (i == 7 && mapped > 7)) {
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207 if (i == 7) {
1208 netif_printk(qdev, tx_done, KERN_DEBUG,
1209 qdev->ndev,
1210 "unmapping OAL area.\n");
1211 }
1212 dma_unmap_single(&qdev->pdev->dev,
1213 dma_unmap_addr(&tx_ring_desc->map[i],
1214 mapaddr),
1215 dma_unmap_len(&tx_ring_desc->map[i],
1216 maplen),
1217 DMA_TO_DEVICE);
1218 } else {
1219 netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
1220 "unmapping frag %d.\n", i);
1221 dma_unmap_page(&qdev->pdev->dev,
1222 dma_unmap_addr(&tx_ring_desc->map[i],
1223 mapaddr),
1224 dma_unmap_len(&tx_ring_desc->map[i],
1225 maplen), DMA_TO_DEVICE);
1226 }
1227 }
1228}
1229
1230
1231
1232
1233static int qlge_map_send(struct qlge_adapter *qdev,
1234 struct qlge_ob_mac_iocb_req *mac_iocb_ptr,
1235 struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1236{
1237 int len = skb_headlen(skb);
1238 dma_addr_t map;
1239 int frag_idx, err, map_idx = 0;
1240 struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1241 int frag_cnt = skb_shinfo(skb)->nr_frags;
1242
1243 if (frag_cnt) {
1244 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
1245 "frag_cnt = %d.\n", frag_cnt);
1246 }
1247
1248
1249
1250 map = dma_map_single(&qdev->pdev->dev, skb->data, len, DMA_TO_DEVICE);
1251
1252 err = dma_mapping_error(&qdev->pdev->dev, map);
1253 if (err) {
1254 netif_err(qdev, tx_queued, qdev->ndev,
1255 "PCI mapping failed with error: %d\n", err);
1256
1257 return NETDEV_TX_BUSY;
1258 }
1259
1260 tbd->len = cpu_to_le32(len);
1261 tbd->addr = cpu_to_le64(map);
1262 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1263 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
1264 map_idx++;
1265
1266
1267
1268
1269
1270
1271
1272
1273 for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1274 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1275
1276 tbd++;
1277 if (frag_idx == 6 && frag_cnt > 7) {
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297 map = dma_map_single(&qdev->pdev->dev, &tx_ring_desc->oal,
1298 sizeof(struct qlge_oal),
1299 DMA_TO_DEVICE);
1300 err = dma_mapping_error(&qdev->pdev->dev, map);
1301 if (err) {
1302 netif_err(qdev, tx_queued, qdev->ndev,
1303 "PCI mapping outbound address list with error: %d\n",
1304 err);
1305 goto map_error;
1306 }
1307
1308 tbd->addr = cpu_to_le64(map);
1309
1310
1311
1312
1313
1314 tbd->len =
1315 cpu_to_le32((sizeof(struct tx_buf_desc) *
1316 (frag_cnt - frag_idx)) | TX_DESC_C);
1317 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
1318 map);
1319 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1320 sizeof(struct qlge_oal));
1321 tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1322 map_idx++;
1323 }
1324
1325 map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag),
1326 DMA_TO_DEVICE);
1327
1328 err = dma_mapping_error(&qdev->pdev->dev, map);
1329 if (err) {
1330 netif_err(qdev, tx_queued, qdev->ndev,
1331 "PCI mapping frags failed with error: %d.\n",
1332 err);
1333 goto map_error;
1334 }
1335
1336 tbd->addr = cpu_to_le64(map);
1337 tbd->len = cpu_to_le32(skb_frag_size(frag));
1338 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1339 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1340 skb_frag_size(frag));
1341 }
1342
1343 tx_ring_desc->map_cnt = map_idx;
1344
1345 tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1346 return NETDEV_TX_OK;
1347
1348map_error:
1349
1350
1351
1352
1353
1354
1355 qlge_unmap_send(qdev, tx_ring_desc, map_idx);
1356 return NETDEV_TX_BUSY;
1357}
1358
1359
1360static void qlge_categorize_rx_err(struct qlge_adapter *qdev, u8 rx_err,
1361 struct rx_ring *rx_ring)
1362{
1363 struct nic_stats *stats = &qdev->nic_stats;
1364
1365 stats->rx_err_count++;
1366 rx_ring->rx_errors++;
1367
1368 switch (rx_err & IB_MAC_IOCB_RSP_ERR_MASK) {
1369 case IB_MAC_IOCB_RSP_ERR_CODE_ERR:
1370 stats->rx_code_err++;
1371 break;
1372 case IB_MAC_IOCB_RSP_ERR_OVERSIZE:
1373 stats->rx_oversize_err++;
1374 break;
1375 case IB_MAC_IOCB_RSP_ERR_UNDERSIZE:
1376 stats->rx_undersize_err++;
1377 break;
1378 case IB_MAC_IOCB_RSP_ERR_PREAMBLE:
1379 stats->rx_preamble_err++;
1380 break;
1381 case IB_MAC_IOCB_RSP_ERR_FRAME_LEN:
1382 stats->rx_frame_len_err++;
1383 break;
1384 case IB_MAC_IOCB_RSP_ERR_CRC:
1385 stats->rx_crc_err++;
1386 break;
1387 default:
1388 break;
1389 }
1390}
1391
1392
1393
1394
1395
1396static void qlge_update_mac_hdr_len(struct qlge_adapter *qdev,
1397 struct qlge_ib_mac_iocb_rsp *ib_mac_rsp,
1398 void *page, size_t *len)
1399{
1400 u16 *tags;
1401
1402 if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
1403 return;
1404 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) {
1405 tags = (u16 *)page;
1406
1407 if (tags[6] == ETH_P_8021Q &&
1408 tags[8] == ETH_P_8021Q)
1409 *len += 2 * VLAN_HLEN;
1410 else
1411 *len += VLAN_HLEN;
1412 }
1413}
1414
1415
1416static void qlge_process_mac_rx_gro_page(struct qlge_adapter *qdev,
1417 struct rx_ring *rx_ring,
1418 struct qlge_ib_mac_iocb_rsp *ib_mac_rsp,
1419 u32 length, u16 vlan_id)
1420{
1421 struct sk_buff *skb;
1422 struct qlge_bq_desc *lbq_desc = qlge_get_curr_lchunk(qdev, rx_ring);
1423 struct napi_struct *napi = &rx_ring->napi;
1424
1425
1426 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1427 qlge_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1428 put_page(lbq_desc->p.pg_chunk.page);
1429 return;
1430 }
1431 napi->dev = qdev->ndev;
1432
1433 skb = napi_get_frags(napi);
1434 if (!skb) {
1435 netif_err(qdev, drv, qdev->ndev,
1436 "Couldn't get an skb, exiting.\n");
1437 rx_ring->rx_dropped++;
1438 put_page(lbq_desc->p.pg_chunk.page);
1439 return;
1440 }
1441 prefetch(lbq_desc->p.pg_chunk.va);
1442 __skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
1443 lbq_desc->p.pg_chunk.page,
1444 lbq_desc->p.pg_chunk.offset,
1445 length);
1446
1447 skb->len += length;
1448 skb->data_len += length;
1449 skb->truesize += length;
1450 skb_shinfo(skb)->nr_frags++;
1451
1452 rx_ring->rx_packets++;
1453 rx_ring->rx_bytes += length;
1454 skb->ip_summed = CHECKSUM_UNNECESSARY;
1455 skb_record_rx_queue(skb, rx_ring->cq_id);
1456 if (vlan_id != 0xffff)
1457 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1458 napi_gro_frags(napi);
1459}
1460
1461
1462static void qlge_process_mac_rx_page(struct qlge_adapter *qdev,
1463 struct rx_ring *rx_ring,
1464 struct qlge_ib_mac_iocb_rsp *ib_mac_rsp,
1465 u32 length, u16 vlan_id)
1466{
1467 struct net_device *ndev = qdev->ndev;
1468 struct sk_buff *skb = NULL;
1469 void *addr;
1470 struct qlge_bq_desc *lbq_desc = qlge_get_curr_lchunk(qdev, rx_ring);
1471 struct napi_struct *napi = &rx_ring->napi;
1472 size_t hlen = ETH_HLEN;
1473
1474 skb = netdev_alloc_skb(ndev, length);
1475 if (!skb) {
1476 rx_ring->rx_dropped++;
1477 put_page(lbq_desc->p.pg_chunk.page);
1478 return;
1479 }
1480
1481 addr = lbq_desc->p.pg_chunk.va;
1482 prefetch(addr);
1483
1484
1485 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1486 qlge_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1487 goto err_out;
1488 }
1489
1490
1491 qlge_update_mac_hdr_len(qdev, ib_mac_rsp, addr, &hlen);
1492
1493
1494
1495
1496 if (skb->len > ndev->mtu + hlen) {
1497 netif_err(qdev, drv, qdev->ndev,
1498 "Segment too small, dropping.\n");
1499 rx_ring->rx_dropped++;
1500 goto err_out;
1501 }
1502 skb_put_data(skb, addr, hlen);
1503 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1504 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1505 length);
1506 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1507 lbq_desc->p.pg_chunk.offset + hlen, length - hlen);
1508 skb->len += length - hlen;
1509 skb->data_len += length - hlen;
1510 skb->truesize += length - hlen;
1511
1512 rx_ring->rx_packets++;
1513 rx_ring->rx_bytes += skb->len;
1514 skb->protocol = eth_type_trans(skb, ndev);
1515 skb_checksum_none_assert(skb);
1516
1517 if ((ndev->features & NETIF_F_RXCSUM) &&
1518 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1519
1520 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1521 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1522 "TCP checksum done!\n");
1523 skb->ip_summed = CHECKSUM_UNNECESSARY;
1524 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1525 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1526
1527 struct iphdr *iph =
1528 (struct iphdr *)((u8 *)addr + hlen);
1529 if (!(iph->frag_off &
1530 htons(IP_MF | IP_OFFSET))) {
1531 skb->ip_summed = CHECKSUM_UNNECESSARY;
1532 netif_printk(qdev, rx_status, KERN_DEBUG,
1533 qdev->ndev,
1534 "UDP checksum done!\n");
1535 }
1536 }
1537 }
1538
1539 skb_record_rx_queue(skb, rx_ring->cq_id);
1540 if (vlan_id != 0xffff)
1541 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1542 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1543 napi_gro_receive(napi, skb);
1544 else
1545 netif_receive_skb(skb);
1546 return;
1547err_out:
1548 dev_kfree_skb_any(skb);
1549 put_page(lbq_desc->p.pg_chunk.page);
1550}
1551
1552
1553static void qlge_process_mac_rx_skb(struct qlge_adapter *qdev,
1554 struct rx_ring *rx_ring,
1555 struct qlge_ib_mac_iocb_rsp *ib_mac_rsp,
1556 u32 length, u16 vlan_id)
1557{
1558 struct qlge_bq_desc *sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
1559 struct net_device *ndev = qdev->ndev;
1560 struct sk_buff *skb, *new_skb;
1561
1562 skb = sbq_desc->p.skb;
1563
1564 new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
1565 if (!new_skb) {
1566 rx_ring->rx_dropped++;
1567 return;
1568 }
1569 skb_reserve(new_skb, NET_IP_ALIGN);
1570
1571 dma_sync_single_for_cpu(&qdev->pdev->dev, sbq_desc->dma_addr,
1572 SMALL_BUF_MAP_SIZE, DMA_FROM_DEVICE);
1573
1574 skb_put_data(new_skb, skb->data, length);
1575
1576 skb = new_skb;
1577
1578
1579 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1580 qlge_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1581 dev_kfree_skb_any(skb);
1582 return;
1583 }
1584
1585
1586 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1587 qlge_check_lb_frame(qdev, skb);
1588 dev_kfree_skb_any(skb);
1589 return;
1590 }
1591
1592
1593
1594
1595 if (skb->len > ndev->mtu + ETH_HLEN) {
1596 dev_kfree_skb_any(skb);
1597 rx_ring->rx_dropped++;
1598 return;
1599 }
1600
1601 prefetch(skb->data);
1602 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1603 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1604 "%s Multicast.\n",
1605 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1606 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1607 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1608 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1609 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1610 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1611 }
1612 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
1613 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1614 "Promiscuous Packet.\n");
1615
1616 rx_ring->rx_packets++;
1617 rx_ring->rx_bytes += skb->len;
1618 skb->protocol = eth_type_trans(skb, ndev);
1619 skb_checksum_none_assert(skb);
1620
1621
1622
1623
1624 if ((ndev->features & NETIF_F_RXCSUM) &&
1625 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1626
1627 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1628 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1629 "TCP checksum done!\n");
1630 skb->ip_summed = CHECKSUM_UNNECESSARY;
1631 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1632 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1633
1634 struct iphdr *iph = (struct iphdr *)skb->data;
1635
1636 if (!(iph->frag_off &
1637 htons(IP_MF | IP_OFFSET))) {
1638 skb->ip_summed = CHECKSUM_UNNECESSARY;
1639 netif_printk(qdev, rx_status, KERN_DEBUG,
1640 qdev->ndev,
1641 "UDP checksum done!\n");
1642 }
1643 }
1644 }
1645
1646 skb_record_rx_queue(skb, rx_ring->cq_id);
1647 if (vlan_id != 0xffff)
1648 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1649 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1650 napi_gro_receive(&rx_ring->napi, skb);
1651 else
1652 netif_receive_skb(skb);
1653}
1654
1655static void qlge_realign_skb(struct sk_buff *skb, int len)
1656{
1657 void *temp_addr = skb->data;
1658
1659
1660
1661
1662
1663 skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1664 skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1665 memmove(skb->data, temp_addr, len);
1666}
1667
1668
1669
1670
1671
1672
1673static struct sk_buff *qlge_build_rx_skb(struct qlge_adapter *qdev,
1674 struct rx_ring *rx_ring,
1675 struct qlge_ib_mac_iocb_rsp *ib_mac_rsp)
1676{
1677 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1678 u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1679 struct qlge_bq_desc *lbq_desc, *sbq_desc;
1680 struct sk_buff *skb = NULL;
1681 size_t hlen = ETH_HLEN;
1682
1683
1684
1685
1686 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1687 ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1688 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1689 "Header of %d bytes in small buffer.\n", hdr_len);
1690
1691
1692
1693 sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
1694 dma_unmap_single(&qdev->pdev->dev, sbq_desc->dma_addr,
1695 SMALL_BUF_MAP_SIZE, DMA_FROM_DEVICE);
1696 skb = sbq_desc->p.skb;
1697 qlge_realign_skb(skb, hdr_len);
1698 skb_put(skb, hdr_len);
1699 sbq_desc->p.skb = NULL;
1700 }
1701
1702
1703
1704
1705 if (unlikely(!length)) {
1706 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1707 "No Data buffer in this packet.\n");
1708 return skb;
1709 }
1710
1711 if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1712 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1713 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1714 "Headers in small, data of %d bytes in small, combine them.\n",
1715 length);
1716
1717
1718
1719
1720
1721
1722
1723 sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
1724 dma_sync_single_for_cpu(&qdev->pdev->dev,
1725 sbq_desc->dma_addr,
1726 SMALL_BUF_MAP_SIZE,
1727 DMA_FROM_DEVICE);
1728 skb_put_data(skb, sbq_desc->p.skb->data, length);
1729 } else {
1730 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1731 "%d bytes in a single small buffer.\n",
1732 length);
1733 sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
1734 skb = sbq_desc->p.skb;
1735 qlge_realign_skb(skb, length);
1736 skb_put(skb, length);
1737 dma_unmap_single(&qdev->pdev->dev, sbq_desc->dma_addr,
1738 SMALL_BUF_MAP_SIZE,
1739 DMA_FROM_DEVICE);
1740 sbq_desc->p.skb = NULL;
1741 }
1742 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1743 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1744 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1745 "Header in small, %d bytes in large. Chain large to small!\n",
1746 length);
1747
1748
1749
1750
1751
1752 lbq_desc = qlge_get_curr_lchunk(qdev, rx_ring);
1753 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1754 "Chaining page at offset = %d, for %d bytes to skb.\n",
1755 lbq_desc->p.pg_chunk.offset, length);
1756 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1757 lbq_desc->p.pg_chunk.offset, length);
1758 skb->len += length;
1759 skb->data_len += length;
1760 skb->truesize += length;
1761 } else {
1762
1763
1764
1765
1766
1767 lbq_desc = qlge_get_curr_lchunk(qdev, rx_ring);
1768 skb = netdev_alloc_skb(qdev->ndev, length);
1769 if (!skb) {
1770 netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
1771 "No skb available, drop the packet.\n");
1772 return NULL;
1773 }
1774 dma_unmap_page(&qdev->pdev->dev, lbq_desc->dma_addr,
1775 qdev->lbq_buf_size,
1776 DMA_FROM_DEVICE);
1777 skb_reserve(skb, NET_IP_ALIGN);
1778 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1779 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1780 length);
1781 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1782 lbq_desc->p.pg_chunk.offset,
1783 length);
1784 skb->len += length;
1785 skb->data_len += length;
1786 skb->truesize += length;
1787 qlge_update_mac_hdr_len(qdev, ib_mac_rsp,
1788 lbq_desc->p.pg_chunk.va,
1789 &hlen);
1790 __pskb_pull_tail(skb, hlen);
1791 }
1792 } else {
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804 int size, i = 0;
1805
1806 sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
1807 dma_unmap_single(&qdev->pdev->dev, sbq_desc->dma_addr,
1808 SMALL_BUF_MAP_SIZE, DMA_FROM_DEVICE);
1809 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1820 "%d bytes of headers & data in chain of large.\n",
1821 length);
1822 skb = sbq_desc->p.skb;
1823 sbq_desc->p.skb = NULL;
1824 skb_reserve(skb, NET_IP_ALIGN);
1825 }
1826 do {
1827 lbq_desc = qlge_get_curr_lchunk(qdev, rx_ring);
1828 size = min(length, qdev->lbq_buf_size);
1829
1830 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1831 "Adding page %d to skb for %d bytes.\n",
1832 i, size);
1833 skb_fill_page_desc(skb, i,
1834 lbq_desc->p.pg_chunk.page,
1835 lbq_desc->p.pg_chunk.offset, size);
1836 skb->len += size;
1837 skb->data_len += size;
1838 skb->truesize += size;
1839 length -= size;
1840 i++;
1841 } while (length > 0);
1842 qlge_update_mac_hdr_len(qdev, ib_mac_rsp, lbq_desc->p.pg_chunk.va,
1843 &hlen);
1844 __pskb_pull_tail(skb, hlen);
1845 }
1846 return skb;
1847}
1848
1849
1850static void qlge_process_mac_split_rx_intr(struct qlge_adapter *qdev,
1851 struct rx_ring *rx_ring,
1852 struct qlge_ib_mac_iocb_rsp *ib_mac_rsp,
1853 u16 vlan_id)
1854{
1855 struct net_device *ndev = qdev->ndev;
1856 struct sk_buff *skb = NULL;
1857
1858 skb = qlge_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1859 if (unlikely(!skb)) {
1860 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1861 "No skb available, drop packet.\n");
1862 rx_ring->rx_dropped++;
1863 return;
1864 }
1865
1866
1867 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1868 qlge_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1869 dev_kfree_skb_any(skb);
1870 return;
1871 }
1872
1873
1874
1875
1876 if (skb->len > ndev->mtu + ETH_HLEN) {
1877 dev_kfree_skb_any(skb);
1878 rx_ring->rx_dropped++;
1879 return;
1880 }
1881
1882
1883 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1884 qlge_check_lb_frame(qdev, skb);
1885 dev_kfree_skb_any(skb);
1886 return;
1887 }
1888
1889 prefetch(skb->data);
1890 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1891 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
1892 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1893 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1894 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1895 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1896 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1897 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1898 rx_ring->rx_multicast++;
1899 }
1900 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
1901 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1902 "Promiscuous Packet.\n");
1903 }
1904
1905 skb->protocol = eth_type_trans(skb, ndev);
1906 skb_checksum_none_assert(skb);
1907
1908
1909
1910
1911 if ((ndev->features & NETIF_F_RXCSUM) &&
1912 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1913
1914 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1915 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1916 "TCP checksum done!\n");
1917 skb->ip_summed = CHECKSUM_UNNECESSARY;
1918 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1919 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1920
1921 struct iphdr *iph = (struct iphdr *)skb->data;
1922
1923 if (!(iph->frag_off &
1924 htons(IP_MF | IP_OFFSET))) {
1925 skb->ip_summed = CHECKSUM_UNNECESSARY;
1926 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1927 "TCP checksum done!\n");
1928 }
1929 }
1930 }
1931
1932 rx_ring->rx_packets++;
1933 rx_ring->rx_bytes += skb->len;
1934 skb_record_rx_queue(skb, rx_ring->cq_id);
1935 if (vlan_id != 0xffff)
1936 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1937 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1938 napi_gro_receive(&rx_ring->napi, skb);
1939 else
1940 netif_receive_skb(skb);
1941}
1942
1943
1944static unsigned long qlge_process_mac_rx_intr(struct qlge_adapter *qdev,
1945 struct rx_ring *rx_ring,
1946 struct qlge_ib_mac_iocb_rsp *ib_mac_rsp)
1947{
1948 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1949 u16 vlan_id = ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
1950 (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)) ?
1951 ((le16_to_cpu(ib_mac_rsp->vlan_id) &
1952 IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
1953
1954 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
1955
1956
1957
1958 qlge_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
1959 vlan_id);
1960 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1961
1962
1963
1964
1965 qlge_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp, length,
1966 vlan_id);
1967 } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
1968 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
1969 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
1970
1971
1972
1973 qlge_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp, length,
1974 vlan_id);
1975 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1976
1977
1978
1979 qlge_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp, length,
1980 vlan_id);
1981 } else {
1982
1983
1984
1985 qlge_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
1986 vlan_id);
1987 }
1988
1989 return (unsigned long)length;
1990}
1991
1992
1993static void qlge_process_mac_tx_intr(struct qlge_adapter *qdev,
1994 struct qlge_ob_mac_iocb_rsp *mac_rsp)
1995{
1996 struct tx_ring *tx_ring;
1997 struct tx_ring_desc *tx_ring_desc;
1998
1999 tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
2000 tx_ring_desc = &tx_ring->q[mac_rsp->tid];
2001 qlge_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
2002 tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
2003 tx_ring->tx_packets++;
2004 dev_kfree_skb(tx_ring_desc->skb);
2005 tx_ring_desc->skb = NULL;
2006
2007 if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
2008 OB_MAC_IOCB_RSP_S |
2009 OB_MAC_IOCB_RSP_L |
2010 OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
2011 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
2012 netif_warn(qdev, tx_done, qdev->ndev,
2013 "Total descriptor length did not match transfer length.\n");
2014 }
2015 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
2016 netif_warn(qdev, tx_done, qdev->ndev,
2017 "Frame too short to be valid, not sent.\n");
2018 }
2019 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
2020 netif_warn(qdev, tx_done, qdev->ndev,
2021 "Frame too long, but sent anyway.\n");
2022 }
2023 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
2024 netif_warn(qdev, tx_done, qdev->ndev,
2025 "PCI backplane error. Frame not sent.\n");
2026 }
2027 }
2028 atomic_inc(&tx_ring->tx_count);
2029}
2030
2031
2032void qlge_queue_fw_error(struct qlge_adapter *qdev)
2033{
2034 qlge_link_off(qdev);
2035 queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
2036}
2037
2038void qlge_queue_asic_error(struct qlge_adapter *qdev)
2039{
2040 qlge_link_off(qdev);
2041 qlge_disable_interrupts(qdev);
2042
2043
2044
2045
2046 clear_bit(QL_ADAPTER_UP, &qdev->flags);
2047
2048
2049
2050 set_bit(QL_ASIC_RECOVERY, &qdev->flags);
2051 queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
2052}
2053
2054static void qlge_process_chip_ae_intr(struct qlge_adapter *qdev,
2055 struct qlge_ib_ae_iocb_rsp *ib_ae_rsp)
2056{
2057 switch (ib_ae_rsp->event) {
2058 case MGMT_ERR_EVENT:
2059 netif_err(qdev, rx_err, qdev->ndev,
2060 "Management Processor Fatal Error.\n");
2061 qlge_queue_fw_error(qdev);
2062 return;
2063
2064 case CAM_LOOKUP_ERR_EVENT:
2065 netdev_err(qdev->ndev, "Multiple CAM hits lookup occurred.\n");
2066 netdev_err(qdev->ndev, "This event shouldn't occur.\n");
2067 qlge_queue_asic_error(qdev);
2068 return;
2069
2070 case SOFT_ECC_ERROR_EVENT:
2071 netdev_err(qdev->ndev, "Soft ECC error detected.\n");
2072 qlge_queue_asic_error(qdev);
2073 break;
2074
2075 case PCI_ERR_ANON_BUF_RD:
2076 netdev_err(qdev->ndev,
2077 "PCI error occurred when reading anonymous buffers from rx_ring %d.\n",
2078 ib_ae_rsp->q_id);
2079 qlge_queue_asic_error(qdev);
2080 break;
2081
2082 default:
2083 netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
2084 ib_ae_rsp->event);
2085 qlge_queue_asic_error(qdev);
2086 break;
2087 }
2088}
2089
2090static int qlge_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2091{
2092 struct qlge_adapter *qdev = rx_ring->qdev;
2093 u32 prod = qlge_read_sh_reg(rx_ring->prod_idx_sh_reg);
2094 struct qlge_ob_mac_iocb_rsp *net_rsp = NULL;
2095 int count = 0;
2096
2097 struct tx_ring *tx_ring;
2098
2099 while (prod != rx_ring->cnsmr_idx) {
2100 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2101 "cq_id = %d, prod = %d, cnsmr = %d\n",
2102 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2103
2104 net_rsp = (struct qlge_ob_mac_iocb_rsp *)rx_ring->curr_entry;
2105 rmb();
2106 switch (net_rsp->opcode) {
2107 case OPCODE_OB_MAC_TSO_IOCB:
2108 case OPCODE_OB_MAC_IOCB:
2109 qlge_process_mac_tx_intr(qdev, net_rsp);
2110 break;
2111 default:
2112 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2113 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2114 net_rsp->opcode);
2115 }
2116 count++;
2117 qlge_update_cq(rx_ring);
2118 prod = qlge_read_sh_reg(rx_ring->prod_idx_sh_reg);
2119 }
2120 if (!net_rsp)
2121 return 0;
2122 qlge_write_cq_idx(rx_ring);
2123 tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
2124 if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
2125 if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2126
2127
2128
2129
2130 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2131 }
2132
2133 return count;
2134}
2135
2136static int qlge_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
2137{
2138 struct qlge_adapter *qdev = rx_ring->qdev;
2139 u32 prod = qlge_read_sh_reg(rx_ring->prod_idx_sh_reg);
2140 struct qlge_net_rsp_iocb *net_rsp;
2141 int count = 0;
2142
2143
2144 while (prod != rx_ring->cnsmr_idx) {
2145 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2146 "cq_id = %d, prod = %d, cnsmr = %d\n",
2147 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2148
2149 net_rsp = rx_ring->curr_entry;
2150 rmb();
2151 switch (net_rsp->opcode) {
2152 case OPCODE_IB_MAC_IOCB:
2153 qlge_process_mac_rx_intr(qdev, rx_ring,
2154 (struct qlge_ib_mac_iocb_rsp *)
2155 net_rsp);
2156 break;
2157
2158 case OPCODE_IB_AE_IOCB:
2159 qlge_process_chip_ae_intr(qdev, (struct qlge_ib_ae_iocb_rsp *)
2160 net_rsp);
2161 break;
2162 default:
2163 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2164 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2165 net_rsp->opcode);
2166 break;
2167 }
2168 count++;
2169 qlge_update_cq(rx_ring);
2170 prod = qlge_read_sh_reg(rx_ring->prod_idx_sh_reg);
2171 if (count == budget)
2172 break;
2173 }
2174 qlge_update_buffer_queues(rx_ring, GFP_ATOMIC, 0);
2175 qlge_write_cq_idx(rx_ring);
2176 return count;
2177}
2178
2179static int qlge_napi_poll_msix(struct napi_struct *napi, int budget)
2180{
2181 struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
2182 struct qlge_adapter *qdev = rx_ring->qdev;
2183 struct rx_ring *trx_ring;
2184 int i, work_done = 0;
2185 struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
2186
2187 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2188 "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
2189
2190
2191
2192
2193 for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
2194 trx_ring = &qdev->rx_ring[i];
2195
2196
2197
2198 if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
2199 (qlge_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
2200 trx_ring->cnsmr_idx)) {
2201 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2202 "%s: Servicing TX completion ring %d.\n",
2203 __func__, trx_ring->cq_id);
2204 qlge_clean_outbound_rx_ring(trx_ring);
2205 }
2206 }
2207
2208
2209
2210
2211 if (qlge_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
2212 rx_ring->cnsmr_idx) {
2213 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2214 "%s: Servicing RX completion ring %d.\n",
2215 __func__, rx_ring->cq_id);
2216 work_done = qlge_clean_inbound_rx_ring(rx_ring, budget);
2217 }
2218
2219 if (work_done < budget) {
2220 napi_complete_done(napi, work_done);
2221 qlge_enable_completion_interrupt(qdev, rx_ring->irq);
2222 }
2223 return work_done;
2224}
2225
2226static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features)
2227{
2228 struct qlge_adapter *qdev = netdev_to_qdev(ndev);
2229
2230 if (features & NETIF_F_HW_VLAN_CTAG_RX) {
2231 qlge_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
2232 NIC_RCV_CFG_VLAN_MATCH_AND_NON);
2233 } else {
2234 qlge_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
2235 }
2236}
2237
2238
2239
2240
2241
2242static int qlge_update_hw_vlan_features(struct net_device *ndev,
2243 netdev_features_t features)
2244{
2245 struct qlge_adapter *qdev = netdev_to_qdev(ndev);
2246 bool need_restart = netif_running(ndev);
2247 int status = 0;
2248
2249 if (need_restart) {
2250 status = qlge_adapter_down(qdev);
2251 if (status) {
2252 netif_err(qdev, link, qdev->ndev,
2253 "Failed to bring down the adapter\n");
2254 return status;
2255 }
2256 }
2257
2258
2259 ndev->features = features;
2260
2261 if (need_restart) {
2262 status = qlge_adapter_up(qdev);
2263 if (status) {
2264 netif_err(qdev, link, qdev->ndev,
2265 "Failed to bring up the adapter\n");
2266 return status;
2267 }
2268 }
2269
2270 return status;
2271}
2272
2273static int qlge_set_features(struct net_device *ndev,
2274 netdev_features_t features)
2275{
2276 netdev_features_t changed = ndev->features ^ features;
2277 int err;
2278
2279 if (changed & NETIF_F_HW_VLAN_CTAG_RX) {
2280
2281 err = qlge_update_hw_vlan_features(ndev, features);
2282 if (err)
2283 return err;
2284
2285 qlge_vlan_mode(ndev, features);
2286 }
2287
2288 return 0;
2289}
2290
2291static int __qlge_vlan_rx_add_vid(struct qlge_adapter *qdev, u16 vid)
2292{
2293 u32 enable_bit = MAC_ADDR_E;
2294 int err;
2295
2296 err = qlge_set_mac_addr_reg(qdev, (u8 *)&enable_bit,
2297 MAC_ADDR_TYPE_VLAN, vid);
2298 if (err)
2299 netif_err(qdev, ifup, qdev->ndev,
2300 "Failed to init vlan address.\n");
2301 return err;
2302}
2303
2304static int qlge_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
2305{
2306 struct qlge_adapter *qdev = netdev_to_qdev(ndev);
2307 int status;
2308 int err;
2309
2310 status = qlge_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2311 if (status)
2312 return status;
2313
2314 err = __qlge_vlan_rx_add_vid(qdev, vid);
2315 set_bit(vid, qdev->active_vlans);
2316
2317 qlge_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2318
2319 return err;
2320}
2321
2322static int __qlge_vlan_rx_kill_vid(struct qlge_adapter *qdev, u16 vid)
2323{
2324 u32 enable_bit = 0;
2325 int err;
2326
2327 err = qlge_set_mac_addr_reg(qdev, (u8 *)&enable_bit,
2328 MAC_ADDR_TYPE_VLAN, vid);
2329 if (err)
2330 netif_err(qdev, ifup, qdev->ndev,
2331 "Failed to clear vlan address.\n");
2332 return err;
2333}
2334
2335static int qlge_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
2336{
2337 struct qlge_adapter *qdev = netdev_to_qdev(ndev);
2338 int status;
2339 int err;
2340
2341 status = qlge_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2342 if (status)
2343 return status;
2344
2345 err = __qlge_vlan_rx_kill_vid(qdev, vid);
2346 clear_bit(vid, qdev->active_vlans);
2347
2348 qlge_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2349
2350 return err;
2351}
2352
2353static void qlge_restore_vlan(struct qlge_adapter *qdev)
2354{
2355 int status;
2356 u16 vid;
2357
2358 status = qlge_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2359 if (status)
2360 return;
2361
2362 for_each_set_bit(vid, qdev->active_vlans, VLAN_N_VID)
2363 __qlge_vlan_rx_add_vid(qdev, vid);
2364
2365 qlge_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2366}
2367
2368
2369static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
2370{
2371 struct rx_ring *rx_ring = dev_id;
2372
2373 napi_schedule(&rx_ring->napi);
2374 return IRQ_HANDLED;
2375}
2376
2377
2378
2379
2380
2381
2382static irqreturn_t qlge_isr(int irq, void *dev_id)
2383{
2384 struct rx_ring *rx_ring = dev_id;
2385 struct qlge_adapter *qdev = rx_ring->qdev;
2386 struct intr_context *intr_context = &qdev->intr_context[0];
2387 u32 var;
2388 int work_done = 0;
2389
2390
2391
2392
2393
2394
2395
2396 if (!test_bit(QL_MSIX_ENABLED, &qdev->flags))
2397 qlge_disable_completion_interrupt(qdev, 0);
2398
2399 var = qlge_read32(qdev, STS);
2400
2401
2402
2403
2404 if (var & STS_FE) {
2405 qlge_disable_completion_interrupt(qdev, 0);
2406 qlge_queue_asic_error(qdev);
2407 netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var);
2408 var = qlge_read32(qdev, ERR_STS);
2409 netdev_err(qdev->ndev, "Resetting chip. Error Status Register = 0x%x\n", var);
2410 return IRQ_HANDLED;
2411 }
2412
2413
2414
2415
2416 if ((var & STS_PI) &&
2417 (qlge_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
2418
2419
2420
2421
2422 netif_err(qdev, intr, qdev->ndev,
2423 "Got MPI processor interrupt.\n");
2424 qlge_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2425 queue_delayed_work_on(smp_processor_id(),
2426 qdev->workqueue, &qdev->mpi_work, 0);
2427 work_done++;
2428 }
2429
2430
2431
2432
2433
2434
2435 var = qlge_read32(qdev, ISR1);
2436 if (var & intr_context->irq_mask) {
2437 netif_info(qdev, intr, qdev->ndev,
2438 "Waking handler for rx_ring[0].\n");
2439 napi_schedule(&rx_ring->napi);
2440 work_done++;
2441 } else {
2442
2443
2444
2445
2446
2447
2448 qlge_enable_completion_interrupt(qdev, 0);
2449 }
2450
2451 return work_done ? IRQ_HANDLED : IRQ_NONE;
2452}
2453
2454static int qlge_tso(struct sk_buff *skb, struct qlge_ob_mac_tso_iocb_req *mac_iocb_ptr)
2455{
2456 if (skb_is_gso(skb)) {
2457 int err;
2458 __be16 l3_proto = vlan_get_protocol(skb);
2459
2460 err = skb_cow_head(skb, 0);
2461 if (err < 0)
2462 return err;
2463
2464 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2465 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2466 mac_iocb_ptr->frame_len = cpu_to_le32((u32)skb->len);
2467 mac_iocb_ptr->total_hdrs_len =
2468 cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
2469 mac_iocb_ptr->net_trans_offset =
2470 cpu_to_le16(skb_network_offset(skb) |
2471 skb_transport_offset(skb)
2472 << OB_MAC_TRANSPORT_HDR_SHIFT);
2473 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2474 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2475 if (likely(l3_proto == htons(ETH_P_IP))) {
2476 struct iphdr *iph = ip_hdr(skb);
2477
2478 iph->check = 0;
2479 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2480 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2481 iph->daddr, 0,
2482 IPPROTO_TCP,
2483 0);
2484 } else if (l3_proto == htons(ETH_P_IPV6)) {
2485 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2486 tcp_hdr(skb)->check =
2487 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2488 &ipv6_hdr(skb)->daddr,
2489 0, IPPROTO_TCP, 0);
2490 }
2491 return 1;
2492 }
2493 return 0;
2494}
2495
2496static void qlge_hw_csum_setup(struct sk_buff *skb,
2497 struct qlge_ob_mac_tso_iocb_req *mac_iocb_ptr)
2498{
2499 int len;
2500 struct iphdr *iph = ip_hdr(skb);
2501 __sum16 *check;
2502
2503 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2504 mac_iocb_ptr->frame_len = cpu_to_le32((u32)skb->len);
2505 mac_iocb_ptr->net_trans_offset =
2506 cpu_to_le16(skb_network_offset(skb) |
2507 skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2508
2509 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2510 len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2511 if (likely(iph->protocol == IPPROTO_TCP)) {
2512 check = &(tcp_hdr(skb)->check);
2513 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2514 mac_iocb_ptr->total_hdrs_len =
2515 cpu_to_le16(skb_transport_offset(skb) +
2516 (tcp_hdr(skb)->doff << 2));
2517 } else {
2518 check = &(udp_hdr(skb)->check);
2519 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2520 mac_iocb_ptr->total_hdrs_len =
2521 cpu_to_le16(skb_transport_offset(skb) +
2522 sizeof(struct udphdr));
2523 }
2524 *check = ~csum_tcpudp_magic(iph->saddr,
2525 iph->daddr, len, iph->protocol, 0);
2526}
2527
2528static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
2529{
2530 struct qlge_adapter *qdev = netdev_to_qdev(ndev);
2531 struct qlge_ob_mac_iocb_req *mac_iocb_ptr;
2532 struct tx_ring_desc *tx_ring_desc;
2533 int tso;
2534 struct tx_ring *tx_ring;
2535 u32 tx_ring_idx = (u32)skb->queue_mapping;
2536
2537 tx_ring = &qdev->tx_ring[tx_ring_idx];
2538
2539 if (skb_padto(skb, ETH_ZLEN))
2540 return NETDEV_TX_OK;
2541
2542 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2543 netif_info(qdev, tx_queued, qdev->ndev,
2544 "%s: BUG! shutting down tx queue %d due to lack of resources.\n",
2545 __func__, tx_ring_idx);
2546 netif_stop_subqueue(ndev, tx_ring->wq_id);
2547 tx_ring->tx_errors++;
2548 return NETDEV_TX_BUSY;
2549 }
2550 tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2551 mac_iocb_ptr = tx_ring_desc->queue_entry;
2552 memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
2553
2554 mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2555 mac_iocb_ptr->tid = tx_ring_desc->index;
2556
2557
2558
2559 mac_iocb_ptr->txq_idx = tx_ring_idx;
2560 tx_ring_desc->skb = skb;
2561
2562 mac_iocb_ptr->frame_len = cpu_to_le16((u16)skb->len);
2563
2564 if (skb_vlan_tag_present(skb)) {
2565 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2566 "Adding a vlan tag %d.\n", skb_vlan_tag_get(skb));
2567 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2568 mac_iocb_ptr->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
2569 }
2570 tso = qlge_tso(skb, (struct qlge_ob_mac_tso_iocb_req *)mac_iocb_ptr);
2571 if (tso < 0) {
2572 dev_kfree_skb_any(skb);
2573 return NETDEV_TX_OK;
2574 } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2575 qlge_hw_csum_setup(skb,
2576 (struct qlge_ob_mac_tso_iocb_req *)mac_iocb_ptr);
2577 }
2578 if (qlge_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2579 NETDEV_TX_OK) {
2580 netif_err(qdev, tx_queued, qdev->ndev,
2581 "Could not map the segments.\n");
2582 tx_ring->tx_errors++;
2583 return NETDEV_TX_BUSY;
2584 }
2585
2586 tx_ring->prod_idx++;
2587 if (tx_ring->prod_idx == tx_ring->wq_len)
2588 tx_ring->prod_idx = 0;
2589 wmb();
2590
2591 qlge_write_db_reg_relaxed(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
2592 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2593 "tx queued, slot %d, len %d\n",
2594 tx_ring->prod_idx, skb->len);
2595
2596 atomic_dec(&tx_ring->tx_count);
2597
2598 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2599 netif_stop_subqueue(ndev, tx_ring->wq_id);
2600 if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2601
2602
2603
2604
2605 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2606 }
2607 return NETDEV_TX_OK;
2608}
2609
2610static void qlge_free_shadow_space(struct qlge_adapter *qdev)
2611{
2612 if (qdev->rx_ring_shadow_reg_area) {
2613 dma_free_coherent(&qdev->pdev->dev,
2614 PAGE_SIZE,
2615 qdev->rx_ring_shadow_reg_area,
2616 qdev->rx_ring_shadow_reg_dma);
2617 qdev->rx_ring_shadow_reg_area = NULL;
2618 }
2619 if (qdev->tx_ring_shadow_reg_area) {
2620 dma_free_coherent(&qdev->pdev->dev,
2621 PAGE_SIZE,
2622 qdev->tx_ring_shadow_reg_area,
2623 qdev->tx_ring_shadow_reg_dma);
2624 qdev->tx_ring_shadow_reg_area = NULL;
2625 }
2626}
2627
2628static int qlge_alloc_shadow_space(struct qlge_adapter *qdev)
2629{
2630 qdev->rx_ring_shadow_reg_area =
2631 dma_alloc_coherent(&qdev->pdev->dev, PAGE_SIZE,
2632 &qdev->rx_ring_shadow_reg_dma, GFP_ATOMIC);
2633 if (!qdev->rx_ring_shadow_reg_area) {
2634 netif_err(qdev, ifup, qdev->ndev,
2635 "Allocation of RX shadow space failed.\n");
2636 return -ENOMEM;
2637 }
2638
2639 qdev->tx_ring_shadow_reg_area =
2640 dma_alloc_coherent(&qdev->pdev->dev, PAGE_SIZE,
2641 &qdev->tx_ring_shadow_reg_dma, GFP_ATOMIC);
2642 if (!qdev->tx_ring_shadow_reg_area) {
2643 netif_err(qdev, ifup, qdev->ndev,
2644 "Allocation of TX shadow space failed.\n");
2645 goto err_wqp_sh_area;
2646 }
2647 return 0;
2648
2649err_wqp_sh_area:
2650 dma_free_coherent(&qdev->pdev->dev,
2651 PAGE_SIZE,
2652 qdev->rx_ring_shadow_reg_area,
2653 qdev->rx_ring_shadow_reg_dma);
2654 return -ENOMEM;
2655}
2656
2657static void qlge_init_tx_ring(struct qlge_adapter *qdev, struct tx_ring *tx_ring)
2658{
2659 struct tx_ring_desc *tx_ring_desc;
2660 int i;
2661 struct qlge_ob_mac_iocb_req *mac_iocb_ptr;
2662
2663 mac_iocb_ptr = tx_ring->wq_base;
2664 tx_ring_desc = tx_ring->q;
2665 for (i = 0; i < tx_ring->wq_len; i++) {
2666 tx_ring_desc->index = i;
2667 tx_ring_desc->skb = NULL;
2668 tx_ring_desc->queue_entry = mac_iocb_ptr;
2669 mac_iocb_ptr++;
2670 tx_ring_desc++;
2671 }
2672 atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2673}
2674
2675static void qlge_free_tx_resources(struct qlge_adapter *qdev,
2676 struct tx_ring *tx_ring)
2677{
2678 if (tx_ring->wq_base) {
2679 dma_free_coherent(&qdev->pdev->dev, tx_ring->wq_size,
2680 tx_ring->wq_base, tx_ring->wq_base_dma);
2681 tx_ring->wq_base = NULL;
2682 }
2683 kfree(tx_ring->q);
2684 tx_ring->q = NULL;
2685}
2686
2687static int qlge_alloc_tx_resources(struct qlge_adapter *qdev,
2688 struct tx_ring *tx_ring)
2689{
2690 tx_ring->wq_base =
2691 dma_alloc_coherent(&qdev->pdev->dev, tx_ring->wq_size,
2692 &tx_ring->wq_base_dma, GFP_ATOMIC);
2693
2694 if (!tx_ring->wq_base ||
2695 tx_ring->wq_base_dma & WQ_ADDR_ALIGN)
2696 goto pci_alloc_err;
2697
2698 tx_ring->q =
2699 kmalloc_array(tx_ring->wq_len, sizeof(struct tx_ring_desc),
2700 GFP_KERNEL);
2701 if (!tx_ring->q)
2702 goto err;
2703
2704 return 0;
2705err:
2706 dma_free_coherent(&qdev->pdev->dev, tx_ring->wq_size,
2707 tx_ring->wq_base, tx_ring->wq_base_dma);
2708 tx_ring->wq_base = NULL;
2709pci_alloc_err:
2710 netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
2711 return -ENOMEM;
2712}
2713
2714static void qlge_free_lbq_buffers(struct qlge_adapter *qdev, struct rx_ring *rx_ring)
2715{
2716 struct qlge_bq *lbq = &rx_ring->lbq;
2717 unsigned int last_offset;
2718
2719 last_offset = qlge_lbq_block_size(qdev) - qdev->lbq_buf_size;
2720 while (lbq->next_to_clean != lbq->next_to_use) {
2721 struct qlge_bq_desc *lbq_desc =
2722 &lbq->queue[lbq->next_to_clean];
2723
2724 if (lbq_desc->p.pg_chunk.offset == last_offset)
2725 dma_unmap_page(&qdev->pdev->dev, lbq_desc->dma_addr,
2726 qlge_lbq_block_size(qdev),
2727 DMA_FROM_DEVICE);
2728 put_page(lbq_desc->p.pg_chunk.page);
2729
2730 lbq->next_to_clean = QLGE_BQ_WRAP(lbq->next_to_clean + 1);
2731 }
2732
2733 if (rx_ring->master_chunk.page) {
2734 dma_unmap_page(&qdev->pdev->dev, rx_ring->chunk_dma_addr,
2735 qlge_lbq_block_size(qdev), DMA_FROM_DEVICE);
2736 put_page(rx_ring->master_chunk.page);
2737 rx_ring->master_chunk.page = NULL;
2738 }
2739}
2740
2741static void qlge_free_sbq_buffers(struct qlge_adapter *qdev, struct rx_ring *rx_ring)
2742{
2743 int i;
2744
2745 for (i = 0; i < QLGE_BQ_LEN; i++) {
2746 struct qlge_bq_desc *sbq_desc = &rx_ring->sbq.queue[i];
2747
2748 if (!sbq_desc) {
2749 netif_err(qdev, ifup, qdev->ndev,
2750 "sbq_desc %d is NULL.\n", i);
2751 return;
2752 }
2753 if (sbq_desc->p.skb) {
2754 dma_unmap_single(&qdev->pdev->dev, sbq_desc->dma_addr,
2755 SMALL_BUF_MAP_SIZE,
2756 DMA_FROM_DEVICE);
2757 dev_kfree_skb(sbq_desc->p.skb);
2758 sbq_desc->p.skb = NULL;
2759 }
2760 }
2761}
2762
2763
2764
2765
2766static void qlge_free_rx_buffers(struct qlge_adapter *qdev)
2767{
2768 int i;
2769
2770 for (i = 0; i < qdev->rx_ring_count; i++) {
2771 struct rx_ring *rx_ring = &qdev->rx_ring[i];
2772
2773 if (rx_ring->lbq.queue)
2774 qlge_free_lbq_buffers(qdev, rx_ring);
2775 if (rx_ring->sbq.queue)
2776 qlge_free_sbq_buffers(qdev, rx_ring);
2777 }
2778}
2779
2780static void qlge_alloc_rx_buffers(struct qlge_adapter *qdev)
2781{
2782 int i;
2783
2784 for (i = 0; i < qdev->rss_ring_count; i++)
2785 qlge_update_buffer_queues(&qdev->rx_ring[i], GFP_KERNEL,
2786 HZ / 2);
2787}
2788
2789static int qlge_init_bq(struct qlge_bq *bq)
2790{
2791 struct rx_ring *rx_ring = QLGE_BQ_CONTAINER(bq);
2792 struct qlge_adapter *qdev = rx_ring->qdev;
2793 struct qlge_bq_desc *bq_desc;
2794 __le64 *buf_ptr;
2795 int i;
2796
2797 bq->base = dma_alloc_coherent(&qdev->pdev->dev, QLGE_BQ_SIZE,
2798 &bq->base_dma, GFP_ATOMIC);
2799 if (!bq->base)
2800 return -ENOMEM;
2801
2802 bq->queue = kmalloc_array(QLGE_BQ_LEN, sizeof(struct qlge_bq_desc),
2803 GFP_KERNEL);
2804 if (!bq->queue)
2805 return -ENOMEM;
2806
2807 buf_ptr = bq->base;
2808 bq_desc = &bq->queue[0];
2809 for (i = 0; i < QLGE_BQ_LEN; i++, buf_ptr++, bq_desc++) {
2810 bq_desc->p.skb = NULL;
2811 bq_desc->index = i;
2812 bq_desc->buf_ptr = buf_ptr;
2813 }
2814
2815 return 0;
2816}
2817
2818static void qlge_free_rx_resources(struct qlge_adapter *qdev,
2819 struct rx_ring *rx_ring)
2820{
2821
2822 if (rx_ring->sbq.base) {
2823 dma_free_coherent(&qdev->pdev->dev, QLGE_BQ_SIZE,
2824 rx_ring->sbq.base, rx_ring->sbq.base_dma);
2825 rx_ring->sbq.base = NULL;
2826 }
2827
2828
2829 kfree(rx_ring->sbq.queue);
2830 rx_ring->sbq.queue = NULL;
2831
2832
2833 if (rx_ring->lbq.base) {
2834 dma_free_coherent(&qdev->pdev->dev, QLGE_BQ_SIZE,
2835 rx_ring->lbq.base, rx_ring->lbq.base_dma);
2836 rx_ring->lbq.base = NULL;
2837 }
2838
2839
2840 kfree(rx_ring->lbq.queue);
2841 rx_ring->lbq.queue = NULL;
2842
2843
2844 if (rx_ring->cq_base) {
2845 dma_free_coherent(&qdev->pdev->dev,
2846 rx_ring->cq_size,
2847 rx_ring->cq_base, rx_ring->cq_base_dma);
2848 rx_ring->cq_base = NULL;
2849 }
2850}
2851
2852
2853
2854
2855static int qlge_alloc_rx_resources(struct qlge_adapter *qdev,
2856 struct rx_ring *rx_ring)
2857{
2858
2859
2860
2861 rx_ring->cq_base =
2862 dma_alloc_coherent(&qdev->pdev->dev, rx_ring->cq_size,
2863 &rx_ring->cq_base_dma, GFP_ATOMIC);
2864
2865 if (!rx_ring->cq_base) {
2866 netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
2867 return -ENOMEM;
2868 }
2869
2870 if (rx_ring->cq_id < qdev->rss_ring_count &&
2871 (qlge_init_bq(&rx_ring->sbq) || qlge_init_bq(&rx_ring->lbq))) {
2872 qlge_free_rx_resources(qdev, rx_ring);
2873 return -ENOMEM;
2874 }
2875
2876 return 0;
2877}
2878
2879static void qlge_tx_ring_clean(struct qlge_adapter *qdev)
2880{
2881 struct tx_ring *tx_ring;
2882 struct tx_ring_desc *tx_ring_desc;
2883 int i, j;
2884
2885
2886
2887
2888
2889 for (j = 0; j < qdev->tx_ring_count; j++) {
2890 tx_ring = &qdev->tx_ring[j];
2891 for (i = 0; i < tx_ring->wq_len; i++) {
2892 tx_ring_desc = &tx_ring->q[i];
2893 if (tx_ring_desc && tx_ring_desc->skb) {
2894 netif_err(qdev, ifdown, qdev->ndev,
2895 "Freeing lost SKB %p, from queue %d, index %d.\n",
2896 tx_ring_desc->skb, j,
2897 tx_ring_desc->index);
2898 qlge_unmap_send(qdev, tx_ring_desc,
2899 tx_ring_desc->map_cnt);
2900 dev_kfree_skb(tx_ring_desc->skb);
2901 tx_ring_desc->skb = NULL;
2902 }
2903 }
2904 }
2905}
2906
2907static void qlge_free_mem_resources(struct qlge_adapter *qdev)
2908{
2909 int i;
2910
2911 for (i = 0; i < qdev->tx_ring_count; i++)
2912 qlge_free_tx_resources(qdev, &qdev->tx_ring[i]);
2913 for (i = 0; i < qdev->rx_ring_count; i++)
2914 qlge_free_rx_resources(qdev, &qdev->rx_ring[i]);
2915 qlge_free_shadow_space(qdev);
2916}
2917
2918static int qlge_alloc_mem_resources(struct qlge_adapter *qdev)
2919{
2920 int i;
2921
2922
2923 if (qlge_alloc_shadow_space(qdev))
2924 return -ENOMEM;
2925
2926 for (i = 0; i < qdev->rx_ring_count; i++) {
2927 if (qlge_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
2928 netif_err(qdev, ifup, qdev->ndev,
2929 "RX resource allocation failed.\n");
2930 goto err_mem;
2931 }
2932 }
2933
2934 for (i = 0; i < qdev->tx_ring_count; i++) {
2935 if (qlge_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
2936 netif_err(qdev, ifup, qdev->ndev,
2937 "TX resource allocation failed.\n");
2938 goto err_mem;
2939 }
2940 }
2941 return 0;
2942
2943err_mem:
2944 qlge_free_mem_resources(qdev);
2945 return -ENOMEM;
2946}
2947
2948
2949
2950
2951
2952static int qlge_start_rx_ring(struct qlge_adapter *qdev, struct rx_ring *rx_ring)
2953{
2954 struct cqicb *cqicb = &rx_ring->cqicb;
2955 void *shadow_reg = qdev->rx_ring_shadow_reg_area +
2956 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
2957 u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
2958 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
2959 void __iomem *doorbell_area =
2960 qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
2961 int err = 0;
2962 u64 tmp;
2963 __le64 *base_indirect_ptr;
2964 int page_entries;
2965
2966
2967 rx_ring->prod_idx_sh_reg = shadow_reg;
2968 rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
2969 *rx_ring->prod_idx_sh_reg = 0;
2970 shadow_reg += sizeof(u64);
2971 shadow_reg_dma += sizeof(u64);
2972 rx_ring->lbq.base_indirect = shadow_reg;
2973 rx_ring->lbq.base_indirect_dma = shadow_reg_dma;
2974 shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN));
2975 shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN));
2976 rx_ring->sbq.base_indirect = shadow_reg;
2977 rx_ring->sbq.base_indirect_dma = shadow_reg_dma;
2978
2979
2980 rx_ring->cnsmr_idx_db_reg = (u32 __iomem *)doorbell_area;
2981 rx_ring->cnsmr_idx = 0;
2982 rx_ring->curr_entry = rx_ring->cq_base;
2983
2984
2985 rx_ring->valid_db_reg = doorbell_area + 0x04;
2986
2987
2988 rx_ring->lbq.prod_idx_db_reg = (u32 __iomem *)(doorbell_area + 0x18);
2989
2990
2991 rx_ring->sbq.prod_idx_db_reg = (u32 __iomem *)(doorbell_area + 0x1c);
2992
2993 memset((void *)cqicb, 0, sizeof(struct cqicb));
2994 cqicb->msix_vect = rx_ring->irq;
2995
2996 cqicb->len = cpu_to_le16(QLGE_FIT16(rx_ring->cq_len) | LEN_V |
2997 LEN_CPP_CONT);
2998
2999 cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
3000
3001 cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
3002
3003
3004
3005
3006 cqicb->flags = FLAGS_LC |
3007 FLAGS_LV |
3008 FLAGS_LI;
3009 if (rx_ring->cq_id < qdev->rss_ring_count) {
3010 cqicb->flags |= FLAGS_LL;
3011 tmp = (u64)rx_ring->lbq.base_dma;
3012 base_indirect_ptr = rx_ring->lbq.base_indirect;
3013 page_entries = 0;
3014 do {
3015 *base_indirect_ptr = cpu_to_le64(tmp);
3016 tmp += DB_PAGE_SIZE;
3017 base_indirect_ptr++;
3018 page_entries++;
3019 } while (page_entries < MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN));
3020 cqicb->lbq_addr = cpu_to_le64(rx_ring->lbq.base_indirect_dma);
3021 cqicb->lbq_buf_size =
3022 cpu_to_le16(QLGE_FIT16(qdev->lbq_buf_size));
3023 cqicb->lbq_len = cpu_to_le16(QLGE_FIT16(QLGE_BQ_LEN));
3024 rx_ring->lbq.next_to_use = 0;
3025 rx_ring->lbq.next_to_clean = 0;
3026
3027 cqicb->flags |= FLAGS_LS;
3028 tmp = (u64)rx_ring->sbq.base_dma;
3029 base_indirect_ptr = rx_ring->sbq.base_indirect;
3030 page_entries = 0;
3031 do {
3032 *base_indirect_ptr = cpu_to_le64(tmp);
3033 tmp += DB_PAGE_SIZE;
3034 base_indirect_ptr++;
3035 page_entries++;
3036 } while (page_entries < MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN));
3037 cqicb->sbq_addr =
3038 cpu_to_le64(rx_ring->sbq.base_indirect_dma);
3039 cqicb->sbq_buf_size = cpu_to_le16(SMALL_BUFFER_SIZE);
3040 cqicb->sbq_len = cpu_to_le16(QLGE_FIT16(QLGE_BQ_LEN));
3041 rx_ring->sbq.next_to_use = 0;
3042 rx_ring->sbq.next_to_clean = 0;
3043 }
3044 if (rx_ring->cq_id < qdev->rss_ring_count) {
3045
3046
3047
3048 netif_napi_add(qdev->ndev, &rx_ring->napi, qlge_napi_poll_msix,
3049 64);
3050 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
3051 cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
3052 } else {
3053 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
3054 cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
3055 }
3056 err = qlge_write_cfg(qdev, cqicb, sizeof(struct cqicb),
3057 CFG_LCQ, rx_ring->cq_id);
3058 if (err) {
3059 netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n");
3060 return err;
3061 }
3062 return err;
3063}
3064
3065static int qlge_start_tx_ring(struct qlge_adapter *qdev, struct tx_ring *tx_ring)
3066{
3067 struct wqicb *wqicb = (struct wqicb *)tx_ring;
3068 void __iomem *doorbell_area =
3069 qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
3070 void *shadow_reg = qdev->tx_ring_shadow_reg_area +
3071 (tx_ring->wq_id * sizeof(u64));
3072 u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
3073 (tx_ring->wq_id * sizeof(u64));
3074 int err = 0;
3075
3076
3077
3078
3079
3080 tx_ring->prod_idx_db_reg = (u32 __iomem *)doorbell_area;
3081 tx_ring->prod_idx = 0;
3082
3083 tx_ring->valid_db_reg = doorbell_area + 0x04;
3084
3085
3086
3087
3088 tx_ring->cnsmr_idx_sh_reg = shadow_reg;
3089 tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
3090
3091 wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
3092 wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
3093 Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
3094 wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
3095 wqicb->rid = 0;
3096 wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
3097
3098 wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
3099
3100 qlge_init_tx_ring(qdev, tx_ring);
3101
3102 err = qlge_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
3103 (u16)tx_ring->wq_id);
3104 if (err) {
3105 netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
3106 return err;
3107 }
3108 return err;
3109}
3110
3111static void qlge_disable_msix(struct qlge_adapter *qdev)
3112{
3113 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3114 pci_disable_msix(qdev->pdev);
3115 clear_bit(QL_MSIX_ENABLED, &qdev->flags);
3116 kfree(qdev->msi_x_entry);
3117 qdev->msi_x_entry = NULL;
3118 } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3119 pci_disable_msi(qdev->pdev);
3120 clear_bit(QL_MSI_ENABLED, &qdev->flags);
3121 }
3122}
3123
3124
3125
3126
3127
3128static void qlge_enable_msix(struct qlge_adapter *qdev)
3129{
3130 int i, err;
3131
3132
3133 if (qlge_irq_type == MSIX_IRQ) {
3134
3135
3136
3137 qdev->msi_x_entry = kcalloc(qdev->intr_count,
3138 sizeof(struct msix_entry),
3139 GFP_KERNEL);
3140 if (!qdev->msi_x_entry) {
3141 qlge_irq_type = MSI_IRQ;
3142 goto msi;
3143 }
3144
3145 for (i = 0; i < qdev->intr_count; i++)
3146 qdev->msi_x_entry[i].entry = i;
3147
3148 err = pci_enable_msix_range(qdev->pdev, qdev->msi_x_entry,
3149 1, qdev->intr_count);
3150 if (err < 0) {
3151 kfree(qdev->msi_x_entry);
3152 qdev->msi_x_entry = NULL;
3153 netif_warn(qdev, ifup, qdev->ndev,
3154 "MSI-X Enable failed, trying MSI.\n");
3155 qlge_irq_type = MSI_IRQ;
3156 } else {
3157 qdev->intr_count = err;
3158 set_bit(QL_MSIX_ENABLED, &qdev->flags);
3159 netif_info(qdev, ifup, qdev->ndev,
3160 "MSI-X Enabled, got %d vectors.\n",
3161 qdev->intr_count);
3162 return;
3163 }
3164 }
3165msi:
3166 qdev->intr_count = 1;
3167 if (qlge_irq_type == MSI_IRQ) {
3168 if (pci_alloc_irq_vectors(qdev->pdev, 1, 1, PCI_IRQ_MSI) >= 0) {
3169 set_bit(QL_MSI_ENABLED, &qdev->flags);
3170 netif_info(qdev, ifup, qdev->ndev,
3171 "Running with MSI interrupts.\n");
3172 return;
3173 }
3174 }
3175 qlge_irq_type = LEG_IRQ;
3176 set_bit(QL_LEGACY_ENABLED, &qdev->flags);
3177 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3178 "Running with legacy interrupts.\n");
3179}
3180
3181
3182
3183
3184
3185
3186
3187
3188
3189
3190static void qlge_set_tx_vect(struct qlge_adapter *qdev)
3191{
3192 int i, j, vect;
3193 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3194
3195 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3196
3197 for (vect = 0, j = 0, i = qdev->rss_ring_count;
3198 i < qdev->rx_ring_count; i++) {
3199 if (j == tx_rings_per_vector) {
3200 vect++;
3201 j = 0;
3202 }
3203 qdev->rx_ring[i].irq = vect;
3204 j++;
3205 }
3206 } else {
3207
3208
3209
3210 for (i = 0; i < qdev->rx_ring_count; i++)
3211 qdev->rx_ring[i].irq = 0;
3212 }
3213}
3214
3215
3216
3217
3218
3219
3220static void qlge_set_irq_mask(struct qlge_adapter *qdev, struct intr_context *ctx)
3221{
3222 int j, vect = ctx->intr;
3223 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3224
3225 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3226
3227
3228
3229 ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
3230
3231
3232
3233 for (j = 0; j < tx_rings_per_vector; j++) {
3234 ctx->irq_mask |=
3235 (1 << qdev->rx_ring[qdev->rss_ring_count +
3236 (vect * tx_rings_per_vector) + j].cq_id);
3237 }
3238 } else {
3239
3240
3241
3242 for (j = 0; j < qdev->rx_ring_count; j++)
3243 ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
3244 }
3245}
3246
3247
3248
3249
3250
3251
3252
3253static void qlge_resolve_queues_to_irqs(struct qlge_adapter *qdev)
3254{
3255 int i = 0;
3256 struct intr_context *intr_context = &qdev->intr_context[0];
3257
3258 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3259
3260
3261
3262
3263 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3264 qdev->rx_ring[i].irq = i;
3265 intr_context->intr = i;
3266 intr_context->qdev = qdev;
3267
3268
3269
3270 qlge_set_irq_mask(qdev, intr_context);
3271
3272
3273
3274
3275 intr_context->intr_en_mask =
3276 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3277 INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
3278 | i;
3279 intr_context->intr_dis_mask =
3280 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3281 INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
3282 INTR_EN_IHD | i;
3283 intr_context->intr_read_mask =
3284 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3285 INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
3286 i;
3287 if (i == 0) {
3288
3289
3290
3291
3292
3293 intr_context->handler = qlge_isr;
3294 sprintf(intr_context->name, "%s-rx-%d",
3295 qdev->ndev->name, i);
3296 } else {
3297
3298
3299
3300 intr_context->handler = qlge_msix_rx_isr;
3301 sprintf(intr_context->name, "%s-rx-%d",
3302 qdev->ndev->name, i);
3303 }
3304 }
3305 } else {
3306
3307
3308
3309
3310 intr_context->intr = 0;
3311 intr_context->qdev = qdev;
3312
3313
3314
3315
3316 intr_context->intr_en_mask =
3317 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
3318 intr_context->intr_dis_mask =
3319 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3320 INTR_EN_TYPE_DISABLE;
3321 if (test_bit(QL_LEGACY_ENABLED, &qdev->flags)) {
3322
3323
3324
3325
3326
3327 intr_context->intr_en_mask |= INTR_EN_EI << 16 |
3328 INTR_EN_EI;
3329 intr_context->intr_dis_mask |= INTR_EN_EI << 16;
3330 }
3331 intr_context->intr_read_mask =
3332 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
3333
3334
3335
3336 intr_context->handler = qlge_isr;
3337 sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
3338
3339
3340
3341
3342
3343 qlge_set_irq_mask(qdev, intr_context);
3344 }
3345
3346
3347
3348 qlge_set_tx_vect(qdev);
3349}
3350
3351static void qlge_free_irq(struct qlge_adapter *qdev)
3352{
3353 int i;
3354 struct intr_context *intr_context = &qdev->intr_context[0];
3355
3356 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3357 if (intr_context->hooked) {
3358 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3359 free_irq(qdev->msi_x_entry[i].vector,
3360 &qdev->rx_ring[i]);
3361 } else {
3362 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
3363 }
3364 }
3365 }
3366 qlge_disable_msix(qdev);
3367}
3368
3369static int qlge_request_irq(struct qlge_adapter *qdev)
3370{
3371 int i;
3372 int status = 0;
3373 struct pci_dev *pdev = qdev->pdev;
3374 struct intr_context *intr_context = &qdev->intr_context[0];
3375
3376 qlge_resolve_queues_to_irqs(qdev);
3377
3378 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3379 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3380 status = request_irq(qdev->msi_x_entry[i].vector,
3381 intr_context->handler,
3382 0,
3383 intr_context->name,
3384 &qdev->rx_ring[i]);
3385 if (status) {
3386 netif_err(qdev, ifup, qdev->ndev,
3387 "Failed request for MSIX interrupt %d.\n",
3388 i);
3389 goto err_irq;
3390 }
3391 } else {
3392 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3393 "trying msi or legacy interrupts.\n");
3394 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3395 "%s: irq = %d.\n", __func__, pdev->irq);
3396 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3397 "%s: context->name = %s.\n", __func__,
3398 intr_context->name);
3399 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3400 "%s: dev_id = 0x%p.\n", __func__,
3401 &qdev->rx_ring[0]);
3402 status =
3403 request_irq(pdev->irq, qlge_isr,
3404 test_bit(QL_MSI_ENABLED, &qdev->flags)
3405 ? 0
3406 : IRQF_SHARED,
3407 intr_context->name, &qdev->rx_ring[0]);
3408 if (status)
3409 goto err_irq;
3410
3411 netif_err(qdev, ifup, qdev->ndev,
3412 "Hooked intr 0, queue type RX_Q, with name %s.\n",
3413 intr_context->name);
3414 }
3415 intr_context->hooked = 1;
3416 }
3417 return status;
3418err_irq:
3419 netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!\n");
3420 qlge_free_irq(qdev);
3421 return status;
3422}
3423
3424static int qlge_start_rss(struct qlge_adapter *qdev)
3425{
3426 static const u8 init_hash_seed[] = {
3427 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3428 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
3429 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
3430 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
3431 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
3432 };
3433 struct ricb *ricb = &qdev->ricb;
3434 int status = 0;
3435 int i;
3436 u8 *hash_id = (u8 *)ricb->hash_cq_id;
3437
3438 memset((void *)ricb, 0, sizeof(*ricb));
3439
3440 ricb->base_cq = RSS_L4K;
3441 ricb->flags =
3442 (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
3443 ricb->mask = cpu_to_le16((u16)(0x3ff));
3444
3445
3446
3447
3448 for (i = 0; i < 1024; i++)
3449 hash_id[i] = (i & (qdev->rss_ring_count - 1));
3450
3451 memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
3452 memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
3453
3454 status = qlge_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
3455 if (status) {
3456 netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n");
3457 return status;
3458 }
3459 return status;
3460}
3461
3462static int qlge_clear_routing_entries(struct qlge_adapter *qdev)
3463{
3464 int i, status = 0;
3465
3466 status = qlge_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3467 if (status)
3468 return status;
3469
3470 for (i = 0; i < 16; i++) {
3471 status = qlge_set_routing_reg(qdev, i, 0, 0);
3472 if (status) {
3473 netif_err(qdev, ifup, qdev->ndev,
3474 "Failed to init routing register for CAM packets.\n");
3475 break;
3476 }
3477 }
3478 qlge_sem_unlock(qdev, SEM_RT_IDX_MASK);
3479 return status;
3480}
3481
3482
3483static int qlge_route_initialize(struct qlge_adapter *qdev)
3484{
3485 int status = 0;
3486
3487
3488 status = qlge_clear_routing_entries(qdev);
3489 if (status)
3490 return status;
3491
3492 status = qlge_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3493 if (status)
3494 return status;
3495
3496 status = qlge_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT,
3497 RT_IDX_IP_CSUM_ERR, 1);
3498 if (status) {
3499 netif_err(qdev, ifup, qdev->ndev,
3500 "Failed to init routing register for IP CSUM error packets.\n");
3501 goto exit;
3502 }
3503 status = qlge_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT,
3504 RT_IDX_TU_CSUM_ERR, 1);
3505 if (status) {
3506 netif_err(qdev, ifup, qdev->ndev,
3507 "Failed to init routing register for TCP/UDP CSUM error packets.\n");
3508 goto exit;
3509 }
3510 status = qlge_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
3511 if (status) {
3512 netif_err(qdev, ifup, qdev->ndev,
3513 "Failed to init routing register for broadcast packets.\n");
3514 goto exit;
3515 }
3516
3517
3518
3519 if (qdev->rss_ring_count > 1) {
3520 status = qlge_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
3521 RT_IDX_RSS_MATCH, 1);
3522 if (status) {
3523 netif_err(qdev, ifup, qdev->ndev,
3524 "Failed to init routing register for MATCH RSS packets.\n");
3525 goto exit;
3526 }
3527 }
3528
3529 status = qlge_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
3530 RT_IDX_CAM_HIT, 1);
3531 if (status)
3532 netif_err(qdev, ifup, qdev->ndev,
3533 "Failed to init routing register for CAM packets.\n");
3534exit:
3535 qlge_sem_unlock(qdev, SEM_RT_IDX_MASK);
3536 return status;
3537}
3538
3539int qlge_cam_route_initialize(struct qlge_adapter *qdev)
3540{
3541 int status, set;
3542
3543
3544
3545
3546
3547 set = qlge_read32(qdev, STS);
3548 set &= qdev->port_link_up;
3549 status = qlge_set_mac_addr(qdev, set);
3550 if (status) {
3551 netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n");
3552 return status;
3553 }
3554
3555 status = qlge_route_initialize(qdev);
3556 if (status)
3557 netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n");
3558
3559 return status;
3560}
3561
3562static int qlge_adapter_initialize(struct qlge_adapter *qdev)
3563{
3564 u32 value, mask;
3565 int i;
3566 int status = 0;
3567
3568
3569
3570
3571 value = SYS_EFE | SYS_FAE;
3572 mask = value << 16;
3573 qlge_write32(qdev, SYS, mask | value);
3574
3575
3576 value = NIC_RCV_CFG_DFQ;
3577 mask = NIC_RCV_CFG_DFQ_MASK;
3578 if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) {
3579 value |= NIC_RCV_CFG_RV;
3580 mask |= (NIC_RCV_CFG_RV << 16);
3581 }
3582 qlge_write32(qdev, NIC_RCV_CFG, (mask | value));
3583
3584
3585 qlge_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3586
3587
3588 value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
3589 FSC_EC | FSC_VM_PAGE_4K;
3590 value |= SPLT_SETTING;
3591
3592
3593 mask = FSC_VM_PAGESIZE_MASK |
3594 FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3595 qlge_write32(qdev, FSC, mask | value);
3596
3597 qlge_write32(qdev, SPLT_HDR, SPLT_LEN);
3598
3599
3600
3601
3602
3603
3604 qlge_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
3605
3606
3607
3608
3609 value = qlge_read32(qdev, MGMT_RCV_CFG);
3610 value &= ~MGMT_RCV_CFG_RM;
3611 mask = 0xffff0000;
3612
3613
3614 qlge_write32(qdev, MGMT_RCV_CFG, mask);
3615 qlge_write32(qdev, MGMT_RCV_CFG, mask | value);
3616
3617
3618 if (qdev->pdev->subsystem_device == 0x0068 ||
3619 qdev->pdev->subsystem_device == 0x0180)
3620 qdev->wol = WAKE_MAGIC;
3621
3622
3623 for (i = 0; i < qdev->rx_ring_count; i++) {
3624 status = qlge_start_rx_ring(qdev, &qdev->rx_ring[i]);
3625 if (status) {
3626 netif_err(qdev, ifup, qdev->ndev,
3627 "Failed to start rx ring[%d].\n", i);
3628 return status;
3629 }
3630 }
3631
3632
3633
3634
3635 if (qdev->rss_ring_count > 1) {
3636 status = qlge_start_rss(qdev);
3637 if (status) {
3638 netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n");
3639 return status;
3640 }
3641 }
3642
3643
3644 for (i = 0; i < qdev->tx_ring_count; i++) {
3645 status = qlge_start_tx_ring(qdev, &qdev->tx_ring[i]);
3646 if (status) {
3647 netif_err(qdev, ifup, qdev->ndev,
3648 "Failed to start tx ring[%d].\n", i);
3649 return status;
3650 }
3651 }
3652
3653
3654 status = qdev->nic_ops->port_initialize(qdev);
3655 if (status)
3656 netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n");
3657
3658
3659 status = qlge_cam_route_initialize(qdev);
3660 if (status) {
3661 netif_err(qdev, ifup, qdev->ndev,
3662 "Failed to init CAM/Routing tables.\n");
3663 return status;
3664 }
3665
3666
3667 for (i = 0; i < qdev->rss_ring_count; i++)
3668 napi_enable(&qdev->rx_ring[i].napi);
3669
3670 return status;
3671}
3672
3673
3674static int qlge_adapter_reset(struct qlge_adapter *qdev)
3675{
3676 u32 value;
3677 int status = 0;
3678 unsigned long end_jiffies;
3679
3680
3681 status = qlge_clear_routing_entries(qdev);
3682 if (status) {
3683 netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n");
3684 return status;
3685 }
3686
3687
3688
3689
3690 if (!test_bit(QL_ASIC_RECOVERY, &qdev->flags)) {
3691
3692 qlge_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
3693
3694
3695 qlge_wait_fifo_empty(qdev);
3696 } else {
3697 clear_bit(QL_ASIC_RECOVERY, &qdev->flags);
3698 }
3699
3700 qlge_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
3701
3702 end_jiffies = jiffies + usecs_to_jiffies(30);
3703 do {
3704 value = qlge_read32(qdev, RST_FO);
3705 if ((value & RST_FO_FR) == 0)
3706 break;
3707 cpu_relax();
3708 } while (time_before(jiffies, end_jiffies));
3709
3710 if (value & RST_FO_FR) {
3711 netif_err(qdev, ifdown, qdev->ndev,
3712 "ETIMEDOUT!!! errored out of resetting the chip!\n");
3713 status = -ETIMEDOUT;
3714 }
3715
3716
3717 qlge_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
3718 return status;
3719}
3720
3721static void qlge_display_dev_info(struct net_device *ndev)
3722{
3723 struct qlge_adapter *qdev = netdev_to_qdev(ndev);
3724
3725 netif_info(qdev, probe, qdev->ndev,
3726 "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, XG Roll = %d, XG Rev = %d.\n",
3727 qdev->func,
3728 qdev->port,
3729 qdev->chip_rev_id & 0x0000000f,
3730 qdev->chip_rev_id >> 4 & 0x0000000f,
3731 qdev->chip_rev_id >> 8 & 0x0000000f,
3732 qdev->chip_rev_id >> 12 & 0x0000000f);
3733 netif_info(qdev, probe, qdev->ndev,
3734 "MAC address %pM\n", ndev->dev_addr);
3735}
3736
3737static int qlge_wol(struct qlge_adapter *qdev)
3738{
3739 int status = 0;
3740 u32 wol = MB_WOL_DISABLE;
3741
3742
3743
3744
3745
3746
3747
3748
3749 if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
3750 WAKE_MCAST | WAKE_BCAST)) {
3751 netif_err(qdev, ifdown, qdev->ndev,
3752 "Unsupported WOL parameter. qdev->wol = 0x%x.\n",
3753 qdev->wol);
3754 return -EINVAL;
3755 }
3756
3757 if (qdev->wol & WAKE_MAGIC) {
3758 status = qlge_mb_wol_set_magic(qdev, 1);
3759 if (status) {
3760 netif_err(qdev, ifdown, qdev->ndev,
3761 "Failed to set magic packet on %s.\n",
3762 qdev->ndev->name);
3763 return status;
3764 }
3765 netif_info(qdev, drv, qdev->ndev,
3766 "Enabled magic packet successfully on %s.\n",
3767 qdev->ndev->name);
3768
3769 wol |= MB_WOL_MAGIC_PKT;
3770 }
3771
3772 if (qdev->wol) {
3773 wol |= MB_WOL_MODE_ON;
3774 status = qlge_mb_wol_mode(qdev, wol);
3775 netif_err(qdev, drv, qdev->ndev,
3776 "WOL %s (wol code 0x%x) on %s\n",
3777 (status == 0) ? "Successfully set" : "Failed",
3778 wol, qdev->ndev->name);
3779 }
3780
3781 return status;
3782}
3783
3784static void qlge_cancel_all_work_sync(struct qlge_adapter *qdev)
3785{
3786
3787
3788
3789 if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3790 cancel_delayed_work_sync(&qdev->asic_reset_work);
3791 cancel_delayed_work_sync(&qdev->mpi_reset_work);
3792 cancel_delayed_work_sync(&qdev->mpi_work);
3793 cancel_delayed_work_sync(&qdev->mpi_idc_work);
3794 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
3795}
3796
3797static int qlge_adapter_down(struct qlge_adapter *qdev)
3798{
3799 int i, status = 0;
3800
3801 qlge_link_off(qdev);
3802
3803 qlge_cancel_all_work_sync(qdev);
3804
3805 for (i = 0; i < qdev->rss_ring_count; i++)
3806 napi_disable(&qdev->rx_ring[i].napi);
3807
3808 clear_bit(QL_ADAPTER_UP, &qdev->flags);
3809
3810 qlge_disable_interrupts(qdev);
3811
3812 qlge_tx_ring_clean(qdev);
3813
3814
3815 for (i = 0; i < qdev->rss_ring_count; i++)
3816 netif_napi_del(&qdev->rx_ring[i].napi);
3817
3818 status = qlge_adapter_reset(qdev);
3819 if (status)
3820 netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
3821 qdev->func);
3822 qlge_free_rx_buffers(qdev);
3823
3824 return status;
3825}
3826
3827static int qlge_adapter_up(struct qlge_adapter *qdev)
3828{
3829 int err = 0;
3830
3831 err = qlge_adapter_initialize(qdev);
3832 if (err) {
3833 netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n");
3834 goto err_init;
3835 }
3836 set_bit(QL_ADAPTER_UP, &qdev->flags);
3837 qlge_alloc_rx_buffers(qdev);
3838
3839
3840
3841 if ((qlge_read32(qdev, STS) & qdev->port_init) &&
3842 (qlge_read32(qdev, STS) & qdev->port_link_up))
3843 qlge_link_on(qdev);
3844
3845 clear_bit(QL_ALLMULTI, &qdev->flags);
3846 clear_bit(QL_PROMISCUOUS, &qdev->flags);
3847 qlge_set_multicast_list(qdev->ndev);
3848
3849
3850 qlge_restore_vlan(qdev);
3851
3852 qlge_enable_interrupts(qdev);
3853 qlge_enable_all_completion_interrupts(qdev);
3854 netif_tx_start_all_queues(qdev->ndev);
3855
3856 return 0;
3857err_init:
3858 qlge_adapter_reset(qdev);
3859 return err;
3860}
3861
3862static void qlge_release_adapter_resources(struct qlge_adapter *qdev)
3863{
3864 qlge_free_mem_resources(qdev);
3865 qlge_free_irq(qdev);
3866}
3867
3868static int qlge_get_adapter_resources(struct qlge_adapter *qdev)
3869{
3870 if (qlge_alloc_mem_resources(qdev)) {
3871 netif_err(qdev, ifup, qdev->ndev, "Unable to allocate memory.\n");
3872 return -ENOMEM;
3873 }
3874 return qlge_request_irq(qdev);
3875}
3876
3877static int qlge_close(struct net_device *ndev)
3878{
3879 struct qlge_adapter *qdev = netdev_to_qdev(ndev);
3880 int i;
3881
3882
3883
3884
3885
3886 if (test_bit(QL_EEH_FATAL, &qdev->flags)) {
3887 netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n");
3888 clear_bit(QL_EEH_FATAL, &qdev->flags);
3889 return 0;
3890 }
3891
3892
3893
3894
3895
3896 while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
3897 msleep(1);
3898
3899
3900 for (i = 0; i < qdev->rss_ring_count; i++)
3901 cancel_delayed_work_sync(&qdev->rx_ring[i].refill_work);
3902
3903 qlge_adapter_down(qdev);
3904 qlge_release_adapter_resources(qdev);
3905 return 0;
3906}
3907
3908static void qlge_set_lb_size(struct qlge_adapter *qdev)
3909{
3910 if (qdev->ndev->mtu <= 1500)
3911 qdev->lbq_buf_size = LARGE_BUFFER_MIN_SIZE;
3912 else
3913 qdev->lbq_buf_size = LARGE_BUFFER_MAX_SIZE;
3914 qdev->lbq_buf_order = get_order(qdev->lbq_buf_size);
3915}
3916
3917static int qlge_configure_rings(struct qlge_adapter *qdev)
3918{
3919 int i;
3920 struct rx_ring *rx_ring;
3921 struct tx_ring *tx_ring;
3922 int cpu_cnt = min_t(int, MAX_CPUS, num_online_cpus());
3923
3924
3925
3926
3927
3928
3929
3930
3931 qdev->intr_count = cpu_cnt;
3932 qlge_enable_msix(qdev);
3933
3934 qdev->rss_ring_count = qdev->intr_count;
3935 qdev->tx_ring_count = cpu_cnt;
3936 qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
3937
3938 for (i = 0; i < qdev->tx_ring_count; i++) {
3939 tx_ring = &qdev->tx_ring[i];
3940 memset((void *)tx_ring, 0, sizeof(*tx_ring));
3941 tx_ring->qdev = qdev;
3942 tx_ring->wq_id = i;
3943 tx_ring->wq_len = qdev->tx_ring_size;
3944 tx_ring->wq_size =
3945 tx_ring->wq_len * sizeof(struct qlge_ob_mac_iocb_req);
3946
3947
3948
3949
3950
3951 tx_ring->cq_id = qdev->rss_ring_count + i;
3952 }
3953
3954 for (i = 0; i < qdev->rx_ring_count; i++) {
3955 rx_ring = &qdev->rx_ring[i];
3956 memset((void *)rx_ring, 0, sizeof(*rx_ring));
3957 rx_ring->qdev = qdev;
3958 rx_ring->cq_id = i;
3959 rx_ring->cpu = i % cpu_cnt;
3960 if (i < qdev->rss_ring_count) {
3961
3962
3963
3964 rx_ring->cq_len = qdev->rx_ring_size;
3965 rx_ring->cq_size =
3966 rx_ring->cq_len * sizeof(struct qlge_net_rsp_iocb);
3967 rx_ring->lbq.type = QLGE_LB;
3968 rx_ring->sbq.type = QLGE_SB;
3969 INIT_DELAYED_WORK(&rx_ring->refill_work,
3970 &qlge_slow_refill);
3971 } else {
3972
3973
3974
3975
3976 rx_ring->cq_len = qdev->tx_ring_size;
3977 rx_ring->cq_size =
3978 rx_ring->cq_len * sizeof(struct qlge_net_rsp_iocb);
3979 }
3980 }
3981 return 0;
3982}
3983
3984static int qlge_open(struct net_device *ndev)
3985{
3986 struct qlge_adapter *qdev = netdev_to_qdev(ndev);
3987 int err = 0;
3988
3989 err = qlge_adapter_reset(qdev);
3990 if (err)
3991 return err;
3992
3993 qlge_set_lb_size(qdev);
3994 err = qlge_configure_rings(qdev);
3995 if (err)
3996 return err;
3997
3998 err = qlge_get_adapter_resources(qdev);
3999 if (err)
4000 goto error_up;
4001
4002 err = qlge_adapter_up(qdev);
4003 if (err)
4004 goto error_up;
4005
4006 return err;
4007
4008error_up:
4009 qlge_release_adapter_resources(qdev);
4010 return err;
4011}
4012
4013static int qlge_change_rx_buffers(struct qlge_adapter *qdev)
4014{
4015 int status;
4016
4017
4018 if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4019 int i = 4;
4020
4021 while (--i && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4022 netif_err(qdev, ifup, qdev->ndev,
4023 "Waiting for adapter UP...\n");
4024 ssleep(1);
4025 }
4026
4027 if (!i) {
4028 netif_err(qdev, ifup, qdev->ndev,
4029 "Timed out waiting for adapter UP\n");
4030 return -ETIMEDOUT;
4031 }
4032 }
4033
4034 status = qlge_adapter_down(qdev);
4035 if (status)
4036 goto error;
4037
4038 qlge_set_lb_size(qdev);
4039
4040 status = qlge_adapter_up(qdev);
4041 if (status)
4042 goto error;
4043
4044 return status;
4045error:
4046 netif_alert(qdev, ifup, qdev->ndev,
4047 "Driver up/down cycle failed, closing device.\n");
4048 set_bit(QL_ADAPTER_UP, &qdev->flags);
4049 dev_close(qdev->ndev);
4050 return status;
4051}
4052
4053static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
4054{
4055 struct qlge_adapter *qdev = netdev_to_qdev(ndev);
4056 int status;
4057
4058 if (ndev->mtu == 1500 && new_mtu == 9000)
4059 netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n");
4060 else if (ndev->mtu == 9000 && new_mtu == 1500)
4061 netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n");
4062 else
4063 return -EINVAL;
4064
4065 queue_delayed_work(qdev->workqueue,
4066 &qdev->mpi_port_cfg_work, 3 * HZ);
4067
4068 ndev->mtu = new_mtu;
4069
4070 if (!netif_running(qdev->ndev))
4071 return 0;
4072
4073 status = qlge_change_rx_buffers(qdev);
4074 if (status) {
4075 netif_err(qdev, ifup, qdev->ndev,
4076 "Changing MTU failed.\n");
4077 }
4078
4079 return status;
4080}
4081
4082static struct net_device_stats *qlge_get_stats(struct net_device
4083 *ndev)
4084{
4085 struct qlge_adapter *qdev = netdev_to_qdev(ndev);
4086 struct rx_ring *rx_ring = &qdev->rx_ring[0];
4087 struct tx_ring *tx_ring = &qdev->tx_ring[0];
4088 unsigned long pkts, mcast, dropped, errors, bytes;
4089 int i;
4090
4091
4092 pkts = mcast = dropped = errors = bytes = 0;
4093 for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
4094 pkts += rx_ring->rx_packets;
4095 bytes += rx_ring->rx_bytes;
4096 dropped += rx_ring->rx_dropped;
4097 errors += rx_ring->rx_errors;
4098 mcast += rx_ring->rx_multicast;
4099 }
4100 ndev->stats.rx_packets = pkts;
4101 ndev->stats.rx_bytes = bytes;
4102 ndev->stats.rx_dropped = dropped;
4103 ndev->stats.rx_errors = errors;
4104 ndev->stats.multicast = mcast;
4105
4106
4107 pkts = errors = bytes = 0;
4108 for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
4109 pkts += tx_ring->tx_packets;
4110 bytes += tx_ring->tx_bytes;
4111 errors += tx_ring->tx_errors;
4112 }
4113 ndev->stats.tx_packets = pkts;
4114 ndev->stats.tx_bytes = bytes;
4115 ndev->stats.tx_errors = errors;
4116 return &ndev->stats;
4117}
4118
4119static void qlge_set_multicast_list(struct net_device *ndev)
4120{
4121 struct qlge_adapter *qdev = netdev_to_qdev(ndev);
4122 struct netdev_hw_addr *ha;
4123 int i, status;
4124
4125 status = qlge_sem_spinlock(qdev, SEM_RT_IDX_MASK);
4126 if (status)
4127 return;
4128
4129
4130
4131
4132 if (ndev->flags & IFF_PROMISC) {
4133 if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4134 if (qlge_set_routing_reg
4135 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
4136 netif_err(qdev, hw, qdev->ndev,
4137 "Failed to set promiscuous mode.\n");
4138 } else {
4139 set_bit(QL_PROMISCUOUS, &qdev->flags);
4140 }
4141 }
4142 } else {
4143 if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4144 if (qlge_set_routing_reg
4145 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
4146 netif_err(qdev, hw, qdev->ndev,
4147 "Failed to clear promiscuous mode.\n");
4148 } else {
4149 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4150 }
4151 }
4152 }
4153
4154
4155
4156
4157
4158 if ((ndev->flags & IFF_ALLMULTI) ||
4159 (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) {
4160 if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
4161 if (qlge_set_routing_reg
4162 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
4163 netif_err(qdev, hw, qdev->ndev,
4164 "Failed to set all-multi mode.\n");
4165 } else {
4166 set_bit(QL_ALLMULTI, &qdev->flags);
4167 }
4168 }
4169 } else {
4170 if (test_bit(QL_ALLMULTI, &qdev->flags)) {
4171 if (qlge_set_routing_reg
4172 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
4173 netif_err(qdev, hw, qdev->ndev,
4174 "Failed to clear all-multi mode.\n");
4175 } else {
4176 clear_bit(QL_ALLMULTI, &qdev->flags);
4177 }
4178 }
4179 }
4180
4181 if (!netdev_mc_empty(ndev)) {
4182 status = qlge_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4183 if (status)
4184 goto exit;
4185 i = 0;
4186 netdev_for_each_mc_addr(ha, ndev) {
4187 if (qlge_set_mac_addr_reg(qdev, (u8 *)ha->addr,
4188 MAC_ADDR_TYPE_MULTI_MAC, i)) {
4189 netif_err(qdev, hw, qdev->ndev,
4190 "Failed to loadmulticast address.\n");
4191 qlge_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4192 goto exit;
4193 }
4194 i++;
4195 }
4196 qlge_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4197 if (qlge_set_routing_reg
4198 (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
4199 netif_err(qdev, hw, qdev->ndev,
4200 "Failed to set multicast match mode.\n");
4201 } else {
4202 set_bit(QL_ALLMULTI, &qdev->flags);
4203 }
4204 }
4205exit:
4206 qlge_sem_unlock(qdev, SEM_RT_IDX_MASK);
4207}
4208
4209static int qlge_set_mac_address(struct net_device *ndev, void *p)
4210{
4211 struct qlge_adapter *qdev = netdev_to_qdev(ndev);
4212 struct sockaddr *addr = p;
4213 int status;
4214
4215 if (!is_valid_ether_addr(addr->sa_data))
4216 return -EADDRNOTAVAIL;
4217 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
4218
4219 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4220
4221 status = qlge_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4222 if (status)
4223 return status;
4224 status = qlge_set_mac_addr_reg(qdev, (u8 *)ndev->dev_addr,
4225 MAC_ADDR_TYPE_CAM_MAC,
4226 qdev->func * MAX_CQ);
4227 if (status)
4228 netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
4229 qlge_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4230 return status;
4231}
4232
4233static void qlge_tx_timeout(struct net_device *ndev, unsigned int txqueue)
4234{
4235 struct qlge_adapter *qdev = netdev_to_qdev(ndev);
4236
4237 qlge_queue_asic_error(qdev);
4238}
4239
4240static void qlge_asic_reset_work(struct work_struct *work)
4241{
4242 struct qlge_adapter *qdev =
4243 container_of(work, struct qlge_adapter, asic_reset_work.work);
4244 int status;
4245
4246 rtnl_lock();
4247 status = qlge_adapter_down(qdev);
4248 if (status)
4249 goto error;
4250
4251 status = qlge_adapter_up(qdev);
4252 if (status)
4253 goto error;
4254
4255
4256 clear_bit(QL_ALLMULTI, &qdev->flags);
4257 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4258 qlge_set_multicast_list(qdev->ndev);
4259
4260 rtnl_unlock();
4261 return;
4262error:
4263 netif_alert(qdev, ifup, qdev->ndev,
4264 "Driver up/down cycle failed, closing device\n");
4265
4266 set_bit(QL_ADAPTER_UP, &qdev->flags);
4267 dev_close(qdev->ndev);
4268 rtnl_unlock();
4269}
4270
4271static const struct nic_operations qla8012_nic_ops = {
4272 .get_flash = qlge_get_8012_flash_params,
4273 .port_initialize = qlge_8012_port_initialize,
4274};
4275
4276static const struct nic_operations qla8000_nic_ops = {
4277 .get_flash = qlge_get_8000_flash_params,
4278 .port_initialize = qlge_8000_port_initialize,
4279};
4280
4281
4282
4283
4284
4285
4286
4287
4288static int qlge_get_alt_pcie_func(struct qlge_adapter *qdev)
4289{
4290 int status = 0;
4291 u32 temp;
4292 u32 nic_func1, nic_func2;
4293
4294 status = qlge_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
4295 &temp);
4296 if (status)
4297 return status;
4298
4299 nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
4300 MPI_TEST_NIC_FUNC_MASK);
4301 nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
4302 MPI_TEST_NIC_FUNC_MASK);
4303
4304 if (qdev->func == nic_func1)
4305 qdev->alt_func = nic_func2;
4306 else if (qdev->func == nic_func2)
4307 qdev->alt_func = nic_func1;
4308 else
4309 status = -EIO;
4310
4311 return status;
4312}
4313
4314static int qlge_get_board_info(struct qlge_adapter *qdev)
4315{
4316 int status;
4317
4318 qdev->func =
4319 (qlge_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
4320 if (qdev->func > 3)
4321 return -EIO;
4322
4323 status = qlge_get_alt_pcie_func(qdev);
4324 if (status)
4325 return status;
4326
4327 qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
4328 if (qdev->port) {
4329 qdev->xg_sem_mask = SEM_XGMAC1_MASK;
4330 qdev->port_link_up = STS_PL1;
4331 qdev->port_init = STS_PI1;
4332 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
4333 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
4334 } else {
4335 qdev->xg_sem_mask = SEM_XGMAC0_MASK;
4336 qdev->port_link_up = STS_PL0;
4337 qdev->port_init = STS_PI0;
4338 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
4339 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
4340 }
4341 qdev->chip_rev_id = qlge_read32(qdev, REV_ID);
4342 qdev->device_id = qdev->pdev->device;
4343 if (qdev->device_id == QLGE_DEVICE_ID_8012)
4344 qdev->nic_ops = &qla8012_nic_ops;
4345 else if (qdev->device_id == QLGE_DEVICE_ID_8000)
4346 qdev->nic_ops = &qla8000_nic_ops;
4347 return status;
4348}
4349
4350static void qlge_release_all(struct pci_dev *pdev)
4351{
4352 struct qlge_adapter *qdev = pci_get_drvdata(pdev);
4353
4354 if (qdev->workqueue) {
4355 destroy_workqueue(qdev->workqueue);
4356 qdev->workqueue = NULL;
4357 }
4358
4359 if (qdev->reg_base)
4360 iounmap(qdev->reg_base);
4361 if (qdev->doorbell_area)
4362 iounmap(qdev->doorbell_area);
4363 vfree(qdev->mpi_coredump);
4364 pci_release_regions(pdev);
4365}
4366
4367static int qlge_init_device(struct pci_dev *pdev, struct qlge_adapter *qdev,
4368 int cards_found)
4369{
4370 struct net_device *ndev = qdev->ndev;
4371 int err = 0;
4372
4373 err = pci_enable_device(pdev);
4374 if (err) {
4375 dev_err(&pdev->dev, "PCI device enable failed.\n");
4376 return err;
4377 }
4378
4379 qdev->pdev = pdev;
4380 pci_set_drvdata(pdev, qdev);
4381
4382
4383 err = pcie_set_readrq(pdev, 4096);
4384 if (err) {
4385 dev_err(&pdev->dev, "Set readrq failed.\n");
4386 goto err_disable_pci;
4387 }
4388
4389 err = pci_request_regions(pdev, DRV_NAME);
4390 if (err) {
4391 dev_err(&pdev->dev, "PCI region request failed.\n");
4392 goto err_disable_pci;
4393 }
4394
4395 pci_set_master(pdev);
4396 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
4397 set_bit(QL_DMA64, &qdev->flags);
4398 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
4399 } else {
4400 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
4401 if (!err)
4402 err = dma_set_coherent_mask(&pdev->dev,
4403 DMA_BIT_MASK(32));
4404 }
4405
4406 if (err) {
4407 dev_err(&pdev->dev, "No usable DMA configuration.\n");
4408 goto err_release_pci;
4409 }
4410
4411
4412 pdev->needs_freset = 1;
4413 pci_save_state(pdev);
4414 qdev->reg_base =
4415 ioremap(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1));
4416 if (!qdev->reg_base) {
4417 dev_err(&pdev->dev, "Register mapping failed.\n");
4418 err = -ENOMEM;
4419 goto err_release_pci;
4420 }
4421
4422 qdev->doorbell_area_size = pci_resource_len(pdev, 3);
4423 qdev->doorbell_area =
4424 ioremap(pci_resource_start(pdev, 3), pci_resource_len(pdev, 3));
4425 if (!qdev->doorbell_area) {
4426 dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
4427 err = -ENOMEM;
4428 goto err_iounmap_base;
4429 }
4430
4431 err = qlge_get_board_info(qdev);
4432 if (err) {
4433 dev_err(&pdev->dev, "Register access failed.\n");
4434 err = -EIO;
4435 goto err_iounmap_doorbell;
4436 }
4437 qdev->msg_enable = netif_msg_init(debug, default_msg);
4438 spin_lock_init(&qdev->stats_lock);
4439
4440 if (qlge_mpi_coredump) {
4441 qdev->mpi_coredump =
4442 vmalloc(sizeof(struct qlge_mpi_coredump));
4443 if (!qdev->mpi_coredump) {
4444 err = -ENOMEM;
4445 goto err_iounmap_doorbell;
4446 }
4447 if (qlge_force_coredump)
4448 set_bit(QL_FRC_COREDUMP, &qdev->flags);
4449 }
4450
4451 err = qdev->nic_ops->get_flash(qdev);
4452 if (err) {
4453 dev_err(&pdev->dev, "Invalid FLASH.\n");
4454 goto err_free_mpi_coredump;
4455 }
4456
4457
4458 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4459
4460
4461 qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
4462 qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
4463
4464
4465 qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
4466 qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
4467 qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4468 qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4469
4470
4471
4472
4473 qdev->workqueue = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM,
4474 ndev->name);
4475 if (!qdev->workqueue) {
4476 err = -ENOMEM;
4477 goto err_free_mpi_coredump;
4478 }
4479
4480 INIT_DELAYED_WORK(&qdev->asic_reset_work, qlge_asic_reset_work);
4481 INIT_DELAYED_WORK(&qdev->mpi_reset_work, qlge_mpi_reset_work);
4482 INIT_DELAYED_WORK(&qdev->mpi_work, qlge_mpi_work);
4483 INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, qlge_mpi_port_cfg_work);
4484 INIT_DELAYED_WORK(&qdev->mpi_idc_work, qlge_mpi_idc_work);
4485 init_completion(&qdev->ide_completion);
4486 mutex_init(&qdev->mpi_mutex);
4487
4488 if (!cards_found) {
4489 dev_info(&pdev->dev, "%s\n", DRV_STRING);
4490 dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
4491 DRV_NAME, DRV_VERSION);
4492 }
4493 return 0;
4494
4495err_free_mpi_coredump:
4496 vfree(qdev->mpi_coredump);
4497err_iounmap_doorbell:
4498 iounmap(qdev->doorbell_area);
4499err_iounmap_base:
4500 iounmap(qdev->reg_base);
4501err_release_pci:
4502 pci_release_regions(pdev);
4503err_disable_pci:
4504 pci_disable_device(pdev);
4505
4506 return err;
4507}
4508
4509static const struct net_device_ops qlge_netdev_ops = {
4510 .ndo_open = qlge_open,
4511 .ndo_stop = qlge_close,
4512 .ndo_start_xmit = qlge_send,
4513 .ndo_change_mtu = qlge_change_mtu,
4514 .ndo_get_stats = qlge_get_stats,
4515 .ndo_set_rx_mode = qlge_set_multicast_list,
4516 .ndo_set_mac_address = qlge_set_mac_address,
4517 .ndo_validate_addr = eth_validate_addr,
4518 .ndo_tx_timeout = qlge_tx_timeout,
4519 .ndo_set_features = qlge_set_features,
4520 .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid,
4521 .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid,
4522};
4523
4524static void qlge_timer(struct timer_list *t)
4525{
4526 struct qlge_adapter *qdev = from_timer(qdev, t, timer);
4527 u32 var = 0;
4528
4529 var = qlge_read32(qdev, STS);
4530 if (pci_channel_offline(qdev->pdev)) {
4531 netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var);
4532 return;
4533 }
4534
4535 mod_timer(&qdev->timer, jiffies + (5 * HZ));
4536}
4537
4538static const struct devlink_ops qlge_devlink_ops;
4539
4540static int qlge_probe(struct pci_dev *pdev,
4541 const struct pci_device_id *pci_entry)
4542{
4543 struct qlge_netdev_priv *ndev_priv;
4544 struct qlge_adapter *qdev = NULL;
4545 struct net_device *ndev = NULL;
4546 struct devlink *devlink;
4547 static int cards_found;
4548 int err;
4549
4550 devlink = devlink_alloc(&qlge_devlink_ops, sizeof(struct qlge_adapter),
4551 &pdev->dev);
4552 if (!devlink)
4553 return -ENOMEM;
4554
4555 qdev = devlink_priv(devlink);
4556
4557 ndev = alloc_etherdev_mq(sizeof(struct qlge_netdev_priv),
4558 min(MAX_CPUS,
4559 netif_get_num_default_rss_queues()));
4560 if (!ndev) {
4561 err = -ENOMEM;
4562 goto devlink_free;
4563 }
4564
4565 ndev_priv = netdev_priv(ndev);
4566 ndev_priv->qdev = qdev;
4567 ndev_priv->ndev = ndev;
4568 qdev->ndev = ndev;
4569 err = qlge_init_device(pdev, qdev, cards_found);
4570 if (err < 0)
4571 goto netdev_free;
4572
4573 SET_NETDEV_DEV(ndev, &pdev->dev);
4574 ndev->hw_features = NETIF_F_SG |
4575 NETIF_F_IP_CSUM |
4576 NETIF_F_TSO |
4577 NETIF_F_TSO_ECN |
4578 NETIF_F_HW_VLAN_CTAG_TX |
4579 NETIF_F_HW_VLAN_CTAG_RX |
4580 NETIF_F_HW_VLAN_CTAG_FILTER |
4581 NETIF_F_RXCSUM;
4582 ndev->features = ndev->hw_features;
4583 ndev->vlan_features = ndev->hw_features;
4584
4585 ndev->vlan_features &= ~(NETIF_F_HW_VLAN_CTAG_FILTER |
4586 NETIF_F_HW_VLAN_CTAG_TX |
4587 NETIF_F_HW_VLAN_CTAG_RX);
4588
4589 if (test_bit(QL_DMA64, &qdev->flags))
4590 ndev->features |= NETIF_F_HIGHDMA;
4591
4592
4593
4594
4595 ndev->tx_queue_len = qdev->tx_ring_size;
4596 ndev->irq = pdev->irq;
4597
4598 ndev->netdev_ops = &qlge_netdev_ops;
4599 ndev->ethtool_ops = &qlge_ethtool_ops;
4600 ndev->watchdog_timeo = 10 * HZ;
4601
4602
4603
4604
4605
4606 ndev->min_mtu = ETH_DATA_LEN;
4607 ndev->max_mtu = 9000;
4608
4609 err = register_netdev(ndev);
4610 if (err) {
4611 dev_err(&pdev->dev, "net device registration failed.\n");
4612 qlge_release_all(pdev);
4613 pci_disable_device(pdev);
4614 goto netdev_free;
4615 }
4616
4617 err = devlink_register(devlink);
4618 if (err)
4619 goto netdev_free;
4620
4621 err = qlge_health_create_reporters(qdev);
4622
4623 if (err)
4624 goto devlink_unregister;
4625
4626
4627
4628
4629 timer_setup(&qdev->timer, qlge_timer, TIMER_DEFERRABLE);
4630 mod_timer(&qdev->timer, jiffies + (5 * HZ));
4631 qlge_link_off(qdev);
4632 qlge_display_dev_info(ndev);
4633 atomic_set(&qdev->lb_count, 0);
4634 cards_found++;
4635 return 0;
4636
4637devlink_unregister:
4638 devlink_unregister(devlink);
4639netdev_free:
4640 free_netdev(ndev);
4641devlink_free:
4642 devlink_free(devlink);
4643
4644 return err;
4645}
4646
4647netdev_tx_t qlge_lb_send(struct sk_buff *skb, struct net_device *ndev)
4648{
4649 return qlge_send(skb, ndev);
4650}
4651
4652int qlge_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
4653{
4654 return qlge_clean_inbound_rx_ring(rx_ring, budget);
4655}
4656
4657static void qlge_remove(struct pci_dev *pdev)
4658{
4659 struct qlge_adapter *qdev = pci_get_drvdata(pdev);
4660 struct net_device *ndev = qdev->ndev;
4661 struct devlink *devlink = priv_to_devlink(qdev);
4662
4663 del_timer_sync(&qdev->timer);
4664 qlge_cancel_all_work_sync(qdev);
4665 unregister_netdev(ndev);
4666 qlge_release_all(pdev);
4667 pci_disable_device(pdev);
4668 devlink_health_reporter_destroy(qdev->reporter);
4669 devlink_unregister(devlink);
4670 devlink_free(devlink);
4671 free_netdev(ndev);
4672}
4673
4674
4675static void qlge_eeh_close(struct net_device *ndev)
4676{
4677 struct qlge_adapter *qdev = netdev_to_qdev(ndev);
4678 int i;
4679
4680 if (netif_carrier_ok(ndev)) {
4681 netif_carrier_off(ndev);
4682 netif_stop_queue(ndev);
4683 }
4684
4685
4686 qlge_cancel_all_work_sync(qdev);
4687
4688 for (i = 0; i < qdev->rss_ring_count; i++)
4689 netif_napi_del(&qdev->rx_ring[i].napi);
4690
4691 clear_bit(QL_ADAPTER_UP, &qdev->flags);
4692 qlge_tx_ring_clean(qdev);
4693 qlge_free_rx_buffers(qdev);
4694 qlge_release_adapter_resources(qdev);
4695}
4696
4697
4698
4699
4700
4701static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4702 pci_channel_state_t state)
4703{
4704 struct qlge_adapter *qdev = pci_get_drvdata(pdev);
4705 struct net_device *ndev = qdev->ndev;
4706
4707 switch (state) {
4708 case pci_channel_io_normal:
4709 return PCI_ERS_RESULT_CAN_RECOVER;
4710 case pci_channel_io_frozen:
4711 netif_device_detach(ndev);
4712 del_timer_sync(&qdev->timer);
4713 if (netif_running(ndev))
4714 qlge_eeh_close(ndev);
4715 pci_disable_device(pdev);
4716 return PCI_ERS_RESULT_NEED_RESET;
4717 case pci_channel_io_perm_failure:
4718 dev_err(&pdev->dev,
4719 "%s: pci_channel_io_perm_failure.\n", __func__);
4720 del_timer_sync(&qdev->timer);
4721 qlge_eeh_close(ndev);
4722 set_bit(QL_EEH_FATAL, &qdev->flags);
4723 return PCI_ERS_RESULT_DISCONNECT;
4724 }
4725
4726
4727 return PCI_ERS_RESULT_NEED_RESET;
4728}
4729
4730
4731
4732
4733
4734
4735
4736static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
4737{
4738 struct qlge_adapter *qdev = pci_get_drvdata(pdev);
4739
4740 pdev->error_state = pci_channel_io_normal;
4741
4742 pci_restore_state(pdev);
4743 if (pci_enable_device(pdev)) {
4744 netif_err(qdev, ifup, qdev->ndev,
4745 "Cannot re-enable PCI device after reset.\n");
4746 return PCI_ERS_RESULT_DISCONNECT;
4747 }
4748 pci_set_master(pdev);
4749
4750 if (qlge_adapter_reset(qdev)) {
4751 netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n");
4752 set_bit(QL_EEH_FATAL, &qdev->flags);
4753 return PCI_ERS_RESULT_DISCONNECT;
4754 }
4755
4756 return PCI_ERS_RESULT_RECOVERED;
4757}
4758
4759static void qlge_io_resume(struct pci_dev *pdev)
4760{
4761 struct qlge_adapter *qdev = pci_get_drvdata(pdev);
4762 struct net_device *ndev = qdev->ndev;
4763 int err = 0;
4764
4765 if (netif_running(ndev)) {
4766 err = qlge_open(ndev);
4767 if (err) {
4768 netif_err(qdev, ifup, qdev->ndev,
4769 "Device initialization failed after reset.\n");
4770 return;
4771 }
4772 } else {
4773 netif_err(qdev, ifup, qdev->ndev,
4774 "Device was not running prior to EEH.\n");
4775 }
4776 mod_timer(&qdev->timer, jiffies + (5 * HZ));
4777 netif_device_attach(ndev);
4778}
4779
4780static const struct pci_error_handlers qlge_err_handler = {
4781 .error_detected = qlge_io_error_detected,
4782 .slot_reset = qlge_io_slot_reset,
4783 .resume = qlge_io_resume,
4784};
4785
4786static int __maybe_unused qlge_suspend(struct device *dev_d)
4787{
4788 struct pci_dev *pdev = to_pci_dev(dev_d);
4789 struct qlge_adapter *qdev;
4790 struct net_device *ndev;
4791 int err;
4792
4793 qdev = pci_get_drvdata(pdev);
4794 ndev = qdev->ndev;
4795 netif_device_detach(ndev);
4796 del_timer_sync(&qdev->timer);
4797
4798 if (netif_running(ndev)) {
4799 err = qlge_adapter_down(qdev);
4800 if (!err)
4801 return err;
4802 }
4803
4804 qlge_wol(qdev);
4805
4806 return 0;
4807}
4808
4809static int __maybe_unused qlge_resume(struct device *dev_d)
4810{
4811 struct pci_dev *pdev = to_pci_dev(dev_d);
4812 struct qlge_adapter *qdev;
4813 struct net_device *ndev;
4814 int err;
4815
4816 qdev = pci_get_drvdata(pdev);
4817 ndev = qdev->ndev;
4818
4819 pci_set_master(pdev);
4820
4821 device_wakeup_disable(dev_d);
4822
4823 if (netif_running(ndev)) {
4824 err = qlge_adapter_up(qdev);
4825 if (err)
4826 return err;
4827 }
4828
4829 mod_timer(&qdev->timer, jiffies + (5 * HZ));
4830 netif_device_attach(ndev);
4831
4832 return 0;
4833}
4834
4835static void qlge_shutdown(struct pci_dev *pdev)
4836{
4837 qlge_suspend(&pdev->dev);
4838}
4839
4840static SIMPLE_DEV_PM_OPS(qlge_pm_ops, qlge_suspend, qlge_resume);
4841
4842static struct pci_driver qlge_driver = {
4843 .name = DRV_NAME,
4844 .id_table = qlge_pci_tbl,
4845 .probe = qlge_probe,
4846 .remove = qlge_remove,
4847 .driver.pm = &qlge_pm_ops,
4848 .shutdown = qlge_shutdown,
4849 .err_handler = &qlge_err_handler
4850};
4851
4852module_pci_driver(qlge_driver);
4853