1
2
3
4
5
6
7
8#include <linux/kernel.h>
9#include <linux/bitops.h>
10#include <linux/types.h>
11#include <linux/module.h>
12#include <linux/list.h>
13#include <linux/pci.h>
14#include <linux/dma-mapping.h>
15#include <linux/pagemap.h>
16#include <linux/sched.h>
17#include <linux/slab.h>
18#include <linux/dmapool.h>
19#include <linux/mempool.h>
20#include <linux/spinlock.h>
21#include <linux/kthread.h>
22#include <linux/interrupt.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/in.h>
26#include <linux/ip.h>
27#include <linux/ipv6.h>
28#include <net/ipv6.h>
29#include <linux/tcp.h>
30#include <linux/udp.h>
31#include <linux/if_arp.h>
32#include <linux/if_ether.h>
33#include <linux/netdevice.h>
34#include <linux/etherdevice.h>
35#include <linux/ethtool.h>
36#include <linux/if_vlan.h>
37#include <linux/skbuff.h>
38#include <linux/delay.h>
39#include <linux/mm.h>
40#include <linux/vmalloc.h>
41#include <linux/prefetch.h>
42#include <net/ip6_checksum.h>
43
44#include "qlge.h"
45#include "qlge_devlink.h"
46
47char qlge_driver_name[] = DRV_NAME;
48const char qlge_driver_version[] = DRV_VERSION;
49
50MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
51MODULE_DESCRIPTION(DRV_STRING " ");
52MODULE_LICENSE("GPL");
53MODULE_VERSION(DRV_VERSION);
54
55static const u32 default_msg =
56 NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
57 NETIF_MSG_IFDOWN |
58 NETIF_MSG_IFUP |
59 NETIF_MSG_RX_ERR |
60 NETIF_MSG_TX_ERR |
61 NETIF_MSG_HW | NETIF_MSG_WOL | 0;
62
63static int debug = -1;
64module_param(debug, int, 0664);
65MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
66
67#define MSIX_IRQ 0
68#define MSI_IRQ 1
69#define LEG_IRQ 2
70static int qlge_irq_type = MSIX_IRQ;
71module_param(qlge_irq_type, int, 0664);
72MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
73
74static int qlge_mpi_coredump;
75module_param(qlge_mpi_coredump, int, 0);
76MODULE_PARM_DESC(qlge_mpi_coredump,
77 "Option to enable MPI firmware dump. Default is OFF - Do Not allocate memory. ");
78
79static int qlge_force_coredump;
80module_param(qlge_force_coredump, int, 0);
81MODULE_PARM_DESC(qlge_force_coredump,
82 "Option to allow force of firmware core dump. Default is OFF - Do not allow.");
83
84static const struct pci_device_id qlge_pci_tbl[] = {
85 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
86 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
87
88 {0,}
89};
90
91MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
92
93static int qlge_wol(struct qlge_adapter *);
94static void qlge_set_multicast_list(struct net_device *);
95static int qlge_adapter_down(struct qlge_adapter *);
96static int qlge_adapter_up(struct qlge_adapter *);
97
98
99
100
101
102static int qlge_sem_trylock(struct qlge_adapter *qdev, u32 sem_mask)
103{
104 u32 sem_bits = 0;
105
106 switch (sem_mask) {
107 case SEM_XGMAC0_MASK:
108 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
109 break;
110 case SEM_XGMAC1_MASK:
111 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
112 break;
113 case SEM_ICB_MASK:
114 sem_bits = SEM_SET << SEM_ICB_SHIFT;
115 break;
116 case SEM_MAC_ADDR_MASK:
117 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
118 break;
119 case SEM_FLASH_MASK:
120 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
121 break;
122 case SEM_PROBE_MASK:
123 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
124 break;
125 case SEM_RT_IDX_MASK:
126 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
127 break;
128 case SEM_PROC_REG_MASK:
129 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
130 break;
131 default:
132 netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
133 return -EINVAL;
134 }
135
136 qlge_write32(qdev, SEM, sem_bits | sem_mask);
137 return !(qlge_read32(qdev, SEM) & sem_bits);
138}
139
140int qlge_sem_spinlock(struct qlge_adapter *qdev, u32 sem_mask)
141{
142 unsigned int wait_count = 30;
143
144 do {
145 if (!qlge_sem_trylock(qdev, sem_mask))
146 return 0;
147 udelay(100);
148 } while (--wait_count);
149 return -ETIMEDOUT;
150}
151
152void qlge_sem_unlock(struct qlge_adapter *qdev, u32 sem_mask)
153{
154 qlge_write32(qdev, SEM, sem_mask);
155 qlge_read32(qdev, SEM);
156}
157
158
159
160
161
162
163int qlge_wait_reg_rdy(struct qlge_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
164{
165 u32 temp;
166 int count;
167
168 for (count = 0; count < UDELAY_COUNT; count++) {
169 temp = qlge_read32(qdev, reg);
170
171
172 if (temp & err_bit) {
173 netif_alert(qdev, probe, qdev->ndev,
174 "register 0x%.08x access error, value = 0x%.08x!.\n",
175 reg, temp);
176 return -EIO;
177 } else if (temp & bit) {
178 return 0;
179 }
180 udelay(UDELAY_DELAY);
181 }
182 netif_alert(qdev, probe, qdev->ndev,
183 "Timed out waiting for reg %x to come ready.\n", reg);
184 return -ETIMEDOUT;
185}
186
187
188
189
190static int qlge_wait_cfg(struct qlge_adapter *qdev, u32 bit)
191{
192 int count;
193 u32 temp;
194
195 for (count = 0; count < UDELAY_COUNT; count++) {
196 temp = qlge_read32(qdev, CFG);
197 if (temp & CFG_LE)
198 return -EIO;
199 if (!(temp & bit))
200 return 0;
201 udelay(UDELAY_DELAY);
202 }
203 return -ETIMEDOUT;
204}
205
206
207
208
209int qlge_write_cfg(struct qlge_adapter *qdev, void *ptr, int size, u32 bit,
210 u16 q_id)
211{
212 u64 map;
213 int status = 0;
214 int direction;
215 u32 mask;
216 u32 value;
217
218 if (bit & (CFG_LRQ | CFG_LR | CFG_LCQ))
219 direction = DMA_TO_DEVICE;
220 else
221 direction = DMA_FROM_DEVICE;
222
223 map = dma_map_single(&qdev->pdev->dev, ptr, size, direction);
224 if (dma_mapping_error(&qdev->pdev->dev, map)) {
225 netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
226 return -ENOMEM;
227 }
228
229 status = qlge_sem_spinlock(qdev, SEM_ICB_MASK);
230 if (status)
231 goto lock_failed;
232
233 status = qlge_wait_cfg(qdev, bit);
234 if (status) {
235 netif_err(qdev, ifup, qdev->ndev,
236 "Timed out waiting for CFG to come ready.\n");
237 goto exit;
238 }
239
240 qlge_write32(qdev, ICB_L, (u32)map);
241 qlge_write32(qdev, ICB_H, (u32)(map >> 32));
242
243 mask = CFG_Q_MASK | (bit << 16);
244 value = bit | (q_id << CFG_Q_SHIFT);
245 qlge_write32(qdev, CFG, (mask | value));
246
247
248
249
250 status = qlge_wait_cfg(qdev, bit);
251exit:
252 qlge_sem_unlock(qdev, SEM_ICB_MASK);
253lock_failed:
254 dma_unmap_single(&qdev->pdev->dev, map, size, direction);
255 return status;
256}
257
258
259int qlge_get_mac_addr_reg(struct qlge_adapter *qdev, u32 type, u16 index,
260 u32 *value)
261{
262 u32 offset = 0;
263 int status;
264
265 switch (type) {
266 case MAC_ADDR_TYPE_MULTI_MAC:
267 case MAC_ADDR_TYPE_CAM_MAC: {
268 status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
269 if (status)
270 break;
271 qlge_write32(qdev, MAC_ADDR_IDX,
272 (offset++) |
273 (index << MAC_ADDR_IDX_SHIFT) |
274 MAC_ADDR_ADR | MAC_ADDR_RS |
275 type);
276 status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MR, 0);
277 if (status)
278 break;
279 *value++ = qlge_read32(qdev, MAC_ADDR_DATA);
280 status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
281 if (status)
282 break;
283 qlge_write32(qdev, MAC_ADDR_IDX,
284 (offset++) |
285 (index << MAC_ADDR_IDX_SHIFT) |
286 MAC_ADDR_ADR | MAC_ADDR_RS |
287 type);
288 status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MR, 0);
289 if (status)
290 break;
291 *value++ = qlge_read32(qdev, MAC_ADDR_DATA);
292 if (type == MAC_ADDR_TYPE_CAM_MAC) {
293 status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX,
294 MAC_ADDR_MW, 0);
295 if (status)
296 break;
297 qlge_write32(qdev, MAC_ADDR_IDX,
298 (offset++) |
299 (index
300 << MAC_ADDR_IDX_SHIFT) |
301 MAC_ADDR_ADR |
302 MAC_ADDR_RS | type);
303 status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX,
304 MAC_ADDR_MR, 0);
305 if (status)
306 break;
307 *value++ = qlge_read32(qdev, MAC_ADDR_DATA);
308 }
309 break;
310 }
311 case MAC_ADDR_TYPE_VLAN:
312 case MAC_ADDR_TYPE_MULTI_FLTR:
313 default:
314 netif_crit(qdev, ifup, qdev->ndev,
315 "Address type %d not yet supported.\n", type);
316 status = -EPERM;
317 }
318 return status;
319}
320
321
322
323
324static int qlge_set_mac_addr_reg(struct qlge_adapter *qdev, u8 *addr, u32 type,
325 u16 index)
326{
327 u32 offset = 0;
328 int status = 0;
329
330 switch (type) {
331 case MAC_ADDR_TYPE_MULTI_MAC: {
332 u32 upper = (addr[0] << 8) | addr[1];
333 u32 lower = (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
334 (addr[5]);
335
336 status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
337 if (status)
338 break;
339 qlge_write32(qdev, MAC_ADDR_IDX,
340 (offset++) | (index << MAC_ADDR_IDX_SHIFT) | type |
341 MAC_ADDR_E);
342 qlge_write32(qdev, MAC_ADDR_DATA, lower);
343 status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
344 if (status)
345 break;
346 qlge_write32(qdev, MAC_ADDR_IDX,
347 (offset++) | (index << MAC_ADDR_IDX_SHIFT) | type |
348 MAC_ADDR_E);
349
350 qlge_write32(qdev, MAC_ADDR_DATA, upper);
351 status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
352 break;
353 }
354 case MAC_ADDR_TYPE_CAM_MAC: {
355 u32 cam_output;
356 u32 upper = (addr[0] << 8) | addr[1];
357 u32 lower = (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
358 (addr[5]);
359 status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
360 if (status)
361 break;
362 qlge_write32(qdev, MAC_ADDR_IDX,
363 (offset++) |
364 (index << MAC_ADDR_IDX_SHIFT) |
365 type);
366 qlge_write32(qdev, MAC_ADDR_DATA, lower);
367 status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
368 if (status)
369 break;
370 qlge_write32(qdev, MAC_ADDR_IDX,
371 (offset++) |
372 (index << MAC_ADDR_IDX_SHIFT) |
373 type);
374 qlge_write32(qdev, MAC_ADDR_DATA, upper);
375 status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
376 if (status)
377 break;
378 qlge_write32(qdev, MAC_ADDR_IDX,
379 (offset) |
380 (index << MAC_ADDR_IDX_SHIFT) |
381 type);
382
383
384
385
386 cam_output = (CAM_OUT_ROUTE_NIC |
387 (qdev->func << CAM_OUT_FUNC_SHIFT) |
388 (0 << CAM_OUT_CQ_ID_SHIFT));
389 if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
390 cam_output |= CAM_OUT_RV;
391
392 qlge_write32(qdev, MAC_ADDR_DATA, cam_output);
393 break;
394 }
395 case MAC_ADDR_TYPE_VLAN: {
396 u32 enable_bit = *((u32 *)&addr[0]);
397
398
399
400
401
402 status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
403 if (status)
404 break;
405 qlge_write32(qdev, MAC_ADDR_IDX,
406 offset |
407 (index << MAC_ADDR_IDX_SHIFT) |
408 type |
409 enable_bit);
410 break;
411 }
412 case MAC_ADDR_TYPE_MULTI_FLTR:
413 default:
414 netif_crit(qdev, ifup, qdev->ndev,
415 "Address type %d not yet supported.\n", type);
416 status = -EPERM;
417 }
418 return status;
419}
420
421
422
423
424
425static int qlge_set_mac_addr(struct qlge_adapter *qdev, int set)
426{
427 int status;
428 char zero_mac_addr[ETH_ALEN];
429 char *addr;
430
431 if (set) {
432 addr = &qdev->current_mac_addr[0];
433 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
434 "Set Mac addr %pM\n", addr);
435 } else {
436 eth_zero_addr(zero_mac_addr);
437 addr = &zero_mac_addr[0];
438 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
439 "Clearing MAC address\n");
440 }
441 status = qlge_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
442 if (status)
443 return status;
444 status = qlge_set_mac_addr_reg(qdev, (u8 *)addr,
445 MAC_ADDR_TYPE_CAM_MAC,
446 qdev->func * MAX_CQ);
447 qlge_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
448 if (status)
449 netif_err(qdev, ifup, qdev->ndev,
450 "Failed to init mac address.\n");
451 return status;
452}
453
454void qlge_link_on(struct qlge_adapter *qdev)
455{
456 netif_err(qdev, link, qdev->ndev, "Link is up.\n");
457 netif_carrier_on(qdev->ndev);
458 qlge_set_mac_addr(qdev, 1);
459}
460
461void qlge_link_off(struct qlge_adapter *qdev)
462{
463 netif_err(qdev, link, qdev->ndev, "Link is down.\n");
464 netif_carrier_off(qdev->ndev);
465 qlge_set_mac_addr(qdev, 0);
466}
467
468
469
470
471int qlge_get_routing_reg(struct qlge_adapter *qdev, u32 index, u32 *value)
472{
473 int status = 0;
474
475 status = qlge_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
476 if (status)
477 goto exit;
478
479 qlge_write32(qdev, RT_IDX,
480 RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
481 status = qlge_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
482 if (status)
483 goto exit;
484 *value = qlge_read32(qdev, RT_DATA);
485exit:
486 return status;
487}
488
489
490
491
492
493
494static int qlge_set_routing_reg(struct qlge_adapter *qdev, u32 index, u32 mask,
495 int enable)
496{
497 int status = -EINVAL;
498 u32 value = 0;
499
500 switch (mask) {
501 case RT_IDX_CAM_HIT:
502 {
503 value = RT_IDX_DST_CAM_Q |
504 RT_IDX_TYPE_NICQ |
505 (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);
506 break;
507 }
508 case RT_IDX_VALID:
509 {
510 value = RT_IDX_DST_DFLT_Q |
511 RT_IDX_TYPE_NICQ |
512 (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);
513 break;
514 }
515 case RT_IDX_ERR:
516 {
517 value = RT_IDX_DST_DFLT_Q |
518 RT_IDX_TYPE_NICQ |
519 (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);
520 break;
521 }
522 case RT_IDX_IP_CSUM_ERR:
523 {
524 value = RT_IDX_DST_DFLT_Q |
525 RT_IDX_TYPE_NICQ |
526 (RT_IDX_IP_CSUM_ERR_SLOT <<
527 RT_IDX_IDX_SHIFT);
528 break;
529 }
530 case RT_IDX_TU_CSUM_ERR:
531 {
532 value = RT_IDX_DST_DFLT_Q |
533 RT_IDX_TYPE_NICQ |
534 (RT_IDX_TCP_UDP_CSUM_ERR_SLOT <<
535 RT_IDX_IDX_SHIFT);
536 break;
537 }
538 case RT_IDX_BCAST:
539 {
540 value = RT_IDX_DST_DFLT_Q |
541 RT_IDX_TYPE_NICQ |
542 (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);
543 break;
544 }
545 case RT_IDX_MCAST:
546 {
547 value = RT_IDX_DST_DFLT_Q |
548 RT_IDX_TYPE_NICQ |
549 (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);
550 break;
551 }
552 case RT_IDX_MCAST_MATCH:
553 {
554 value = RT_IDX_DST_DFLT_Q |
555 RT_IDX_TYPE_NICQ |
556 (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);
557 break;
558 }
559 case RT_IDX_RSS_MATCH:
560 {
561 value = RT_IDX_DST_RSS |
562 RT_IDX_TYPE_NICQ |
563 (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);
564 break;
565 }
566 case 0:
567 {
568 value = RT_IDX_DST_DFLT_Q |
569 RT_IDX_TYPE_NICQ |
570 (index << RT_IDX_IDX_SHIFT);
571 break;
572 }
573 default:
574 netif_err(qdev, ifup, qdev->ndev,
575 "Mask type %d not yet supported.\n", mask);
576 status = -EPERM;
577 goto exit;
578 }
579
580 if (value) {
581 status = qlge_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
582 if (status)
583 goto exit;
584 value |= (enable ? RT_IDX_E : 0);
585 qlge_write32(qdev, RT_IDX, value);
586 qlge_write32(qdev, RT_DATA, enable ? mask : 0);
587 }
588exit:
589 return status;
590}
591
592static void qlge_enable_interrupts(struct qlge_adapter *qdev)
593{
594 qlge_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
595}
596
597static void qlge_disable_interrupts(struct qlge_adapter *qdev)
598{
599 qlge_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
600}
601
602static void qlge_enable_completion_interrupt(struct qlge_adapter *qdev, u32 intr)
603{
604 struct intr_context *ctx = &qdev->intr_context[intr];
605
606 qlge_write32(qdev, INTR_EN, ctx->intr_en_mask);
607}
608
609static void qlge_disable_completion_interrupt(struct qlge_adapter *qdev, u32 intr)
610{
611 struct intr_context *ctx = &qdev->intr_context[intr];
612
613 qlge_write32(qdev, INTR_EN, ctx->intr_dis_mask);
614}
615
616static void qlge_enable_all_completion_interrupts(struct qlge_adapter *qdev)
617{
618 int i;
619
620 for (i = 0; i < qdev->intr_count; i++)
621 qlge_enable_completion_interrupt(qdev, i);
622}
623
624static int qlge_validate_flash(struct qlge_adapter *qdev, u32 size, const char *str)
625{
626 int status, i;
627 u16 csum = 0;
628 __le16 *flash = (__le16 *)&qdev->flash;
629
630 status = strncmp((char *)&qdev->flash, str, 4);
631 if (status) {
632 netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
633 return status;
634 }
635
636 for (i = 0; i < size; i++)
637 csum += le16_to_cpu(*flash++);
638
639 if (csum)
640 netif_err(qdev, ifup, qdev->ndev,
641 "Invalid flash checksum, csum = 0x%.04x.\n", csum);
642
643 return csum;
644}
645
646static int qlge_read_flash_word(struct qlge_adapter *qdev, int offset, __le32 *data)
647{
648 int status = 0;
649
650 status = qlge_wait_reg_rdy(qdev,
651 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
652 if (status)
653 goto exit;
654
655 qlge_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
656
657 status = qlge_wait_reg_rdy(qdev,
658 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
659 if (status)
660 goto exit;
661
662
663
664
665 *data = cpu_to_le32(qlge_read32(qdev, FLASH_DATA));
666exit:
667 return status;
668}
669
670static int qlge_get_8000_flash_params(struct qlge_adapter *qdev)
671{
672 u32 i, size;
673 int status;
674 __le32 *p = (__le32 *)&qdev->flash;
675 u32 offset;
676 u8 mac_addr[6];
677
678
679
680
681 if (!qdev->port)
682 offset = FUNC0_FLASH_OFFSET / sizeof(u32);
683 else
684 offset = FUNC1_FLASH_OFFSET / sizeof(u32);
685
686 if (qlge_sem_spinlock(qdev, SEM_FLASH_MASK))
687 return -ETIMEDOUT;
688
689 size = sizeof(struct flash_params_8000) / sizeof(u32);
690 for (i = 0; i < size; i++, p++) {
691 status = qlge_read_flash_word(qdev, i + offset, p);
692 if (status) {
693 netif_err(qdev, ifup, qdev->ndev,
694 "Error reading flash.\n");
695 goto exit;
696 }
697 }
698
699 status = qlge_validate_flash(qdev,
700 sizeof(struct flash_params_8000) /
701 sizeof(u16),
702 "8000");
703 if (status) {
704 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
705 status = -EINVAL;
706 goto exit;
707 }
708
709
710
711
712 if (qdev->flash.flash_params_8000.data_type1 == 2)
713 memcpy(mac_addr,
714 qdev->flash.flash_params_8000.mac_addr1,
715 qdev->ndev->addr_len);
716 else
717 memcpy(mac_addr,
718 qdev->flash.flash_params_8000.mac_addr,
719 qdev->ndev->addr_len);
720
721 if (!is_valid_ether_addr(mac_addr)) {
722 netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
723 status = -EINVAL;
724 goto exit;
725 }
726
727 memcpy(qdev->ndev->dev_addr,
728 mac_addr,
729 qdev->ndev->addr_len);
730
731exit:
732 qlge_sem_unlock(qdev, SEM_FLASH_MASK);
733 return status;
734}
735
736static int qlge_get_8012_flash_params(struct qlge_adapter *qdev)
737{
738 int i;
739 int status;
740 __le32 *p = (__le32 *)&qdev->flash;
741 u32 offset = 0;
742 u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
743
744
745
746
747 if (qdev->port)
748 offset = size;
749
750 if (qlge_sem_spinlock(qdev, SEM_FLASH_MASK))
751 return -ETIMEDOUT;
752
753 for (i = 0; i < size; i++, p++) {
754 status = qlge_read_flash_word(qdev, i + offset, p);
755 if (status) {
756 netif_err(qdev, ifup, qdev->ndev,
757 "Error reading flash.\n");
758 goto exit;
759 }
760 }
761
762 status = qlge_validate_flash(qdev,
763 sizeof(struct flash_params_8012) /
764 sizeof(u16),
765 "8012");
766 if (status) {
767 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
768 status = -EINVAL;
769 goto exit;
770 }
771
772 if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
773 status = -EINVAL;
774 goto exit;
775 }
776
777 memcpy(qdev->ndev->dev_addr,
778 qdev->flash.flash_params_8012.mac_addr,
779 qdev->ndev->addr_len);
780
781exit:
782 qlge_sem_unlock(qdev, SEM_FLASH_MASK);
783 return status;
784}
785
786
787
788
789
790static int qlge_write_xgmac_reg(struct qlge_adapter *qdev, u32 reg, u32 data)
791{
792 int status;
793
794 status = qlge_wait_reg_rdy(qdev,
795 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
796 if (status)
797 return status;
798
799 qlge_write32(qdev, XGMAC_DATA, data);
800
801 qlge_write32(qdev, XGMAC_ADDR, reg);
802 return status;
803}
804
805
806
807
808
809int qlge_read_xgmac_reg(struct qlge_adapter *qdev, u32 reg, u32 *data)
810{
811 int status = 0;
812
813 status = qlge_wait_reg_rdy(qdev,
814 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
815 if (status)
816 goto exit;
817
818 qlge_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
819
820 status = qlge_wait_reg_rdy(qdev,
821 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
822 if (status)
823 goto exit;
824
825 *data = qlge_read32(qdev, XGMAC_DATA);
826exit:
827 return status;
828}
829
830
831int qlge_read_xgmac_reg64(struct qlge_adapter *qdev, u32 reg, u64 *data)
832{
833 int status = 0;
834 u32 hi = 0;
835 u32 lo = 0;
836
837 status = qlge_read_xgmac_reg(qdev, reg, &lo);
838 if (status)
839 goto exit;
840
841 status = qlge_read_xgmac_reg(qdev, reg + 4, &hi);
842 if (status)
843 goto exit;
844
845 *data = (u64)lo | ((u64)hi << 32);
846
847exit:
848 return status;
849}
850
851static int qlge_8000_port_initialize(struct qlge_adapter *qdev)
852{
853 int status;
854
855
856
857
858 status = qlge_mb_about_fw(qdev);
859 if (status)
860 goto exit;
861 status = qlge_mb_get_fw_state(qdev);
862 if (status)
863 goto exit;
864
865 queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
866exit:
867 return status;
868}
869
870
871
872
873
874
875
876static int qlge_8012_port_initialize(struct qlge_adapter *qdev)
877{
878 int status = 0;
879 u32 data;
880
881 if (qlge_sem_trylock(qdev, qdev->xg_sem_mask)) {
882
883
884
885 netif_info(qdev, link, qdev->ndev,
886 "Another function has the semaphore, so wait for the port init bit to come ready.\n");
887 status = qlge_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
888 if (status) {
889 netif_crit(qdev, link, qdev->ndev,
890 "Port initialize timed out.\n");
891 }
892 return status;
893 }
894
895 netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
896
897 status = qlge_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
898 if (status)
899 goto end;
900 data |= GLOBAL_CFG_RESET;
901 status = qlge_write_xgmac_reg(qdev, GLOBAL_CFG, data);
902 if (status)
903 goto end;
904
905
906 data &= ~GLOBAL_CFG_RESET;
907 data |= GLOBAL_CFG_JUMBO;
908 data |= GLOBAL_CFG_TX_STAT_EN;
909 data |= GLOBAL_CFG_RX_STAT_EN;
910 status = qlge_write_xgmac_reg(qdev, GLOBAL_CFG, data);
911 if (status)
912 goto end;
913
914
915 status = qlge_read_xgmac_reg(qdev, TX_CFG, &data);
916 if (status)
917 goto end;
918 data &= ~TX_CFG_RESET;
919 data |= TX_CFG_EN;
920 status = qlge_write_xgmac_reg(qdev, TX_CFG, data);
921 if (status)
922 goto end;
923
924
925 status = qlge_read_xgmac_reg(qdev, RX_CFG, &data);
926 if (status)
927 goto end;
928 data &= ~RX_CFG_RESET;
929 data |= RX_CFG_EN;
930 status = qlge_write_xgmac_reg(qdev, RX_CFG, data);
931 if (status)
932 goto end;
933
934
935 status =
936 qlge_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
937 if (status)
938 goto end;
939 status =
940 qlge_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
941 if (status)
942 goto end;
943
944
945 qlge_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
946end:
947 qlge_sem_unlock(qdev, qdev->xg_sem_mask);
948 return status;
949}
950
951static inline unsigned int qlge_lbq_block_size(struct qlge_adapter *qdev)
952{
953 return PAGE_SIZE << qdev->lbq_buf_order;
954}
955
956static struct qlge_bq_desc *qlge_get_curr_buf(struct qlge_bq *bq)
957{
958 struct qlge_bq_desc *bq_desc;
959
960 bq_desc = &bq->queue[bq->next_to_clean];
961 bq->next_to_clean = QLGE_BQ_WRAP(bq->next_to_clean + 1);
962
963 return bq_desc;
964}
965
966static struct qlge_bq_desc *qlge_get_curr_lchunk(struct qlge_adapter *qdev,
967 struct rx_ring *rx_ring)
968{
969 struct qlge_bq_desc *lbq_desc = qlge_get_curr_buf(&rx_ring->lbq);
970
971 dma_sync_single_for_cpu(&qdev->pdev->dev, lbq_desc->dma_addr,
972 qdev->lbq_buf_size, DMA_FROM_DEVICE);
973
974 if ((lbq_desc->p.pg_chunk.offset + qdev->lbq_buf_size) ==
975 qlge_lbq_block_size(qdev)) {
976
977 dma_unmap_page(&qdev->pdev->dev, lbq_desc->dma_addr,
978 qlge_lbq_block_size(qdev), DMA_FROM_DEVICE);
979 }
980
981 return lbq_desc;
982}
983
984
985static void qlge_update_cq(struct rx_ring *rx_ring)
986{
987 rx_ring->cnsmr_idx++;
988 rx_ring->curr_entry++;
989 if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
990 rx_ring->cnsmr_idx = 0;
991 rx_ring->curr_entry = rx_ring->cq_base;
992 }
993}
994
995static void qlge_write_cq_idx(struct rx_ring *rx_ring)
996{
997 qlge_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
998}
999
1000static const char * const bq_type_name[] = {
1001 [QLGE_SB] = "sbq",
1002 [QLGE_LB] = "lbq",
1003};
1004
1005
1006static int qlge_refill_sb(struct rx_ring *rx_ring,
1007 struct qlge_bq_desc *sbq_desc, gfp_t gfp)
1008{
1009 struct qlge_adapter *qdev = rx_ring->qdev;
1010 struct sk_buff *skb;
1011
1012 if (sbq_desc->p.skb)
1013 return 0;
1014
1015 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1016 "ring %u sbq: getting new skb for index %d.\n",
1017 rx_ring->cq_id, sbq_desc->index);
1018
1019 skb = __netdev_alloc_skb(qdev->ndev, SMALL_BUFFER_SIZE, gfp);
1020 if (!skb)
1021 return -ENOMEM;
1022 skb_reserve(skb, QLGE_SB_PAD);
1023
1024 sbq_desc->dma_addr = dma_map_single(&qdev->pdev->dev, skb->data,
1025 SMALL_BUF_MAP_SIZE,
1026 DMA_FROM_DEVICE);
1027 if (dma_mapping_error(&qdev->pdev->dev, sbq_desc->dma_addr)) {
1028 netif_err(qdev, ifup, qdev->ndev, "PCI mapping failed.\n");
1029 dev_kfree_skb_any(skb);
1030 return -EIO;
1031 }
1032 *sbq_desc->buf_ptr = cpu_to_le64(sbq_desc->dma_addr);
1033
1034 sbq_desc->p.skb = skb;
1035 return 0;
1036}
1037
1038
1039static int qlge_refill_lb(struct rx_ring *rx_ring,
1040 struct qlge_bq_desc *lbq_desc, gfp_t gfp)
1041{
1042 struct qlge_adapter *qdev = rx_ring->qdev;
1043 struct qlge_page_chunk *master_chunk = &rx_ring->master_chunk;
1044
1045 if (!master_chunk->page) {
1046 struct page *page;
1047 dma_addr_t dma_addr;
1048
1049 page = alloc_pages(gfp | __GFP_COMP, qdev->lbq_buf_order);
1050 if (unlikely(!page))
1051 return -ENOMEM;
1052 dma_addr = dma_map_page(&qdev->pdev->dev, page, 0,
1053 qlge_lbq_block_size(qdev),
1054 DMA_FROM_DEVICE);
1055 if (dma_mapping_error(&qdev->pdev->dev, dma_addr)) {
1056 __free_pages(page, qdev->lbq_buf_order);
1057 netif_err(qdev, drv, qdev->ndev,
1058 "PCI mapping failed.\n");
1059 return -EIO;
1060 }
1061 master_chunk->page = page;
1062 master_chunk->va = page_address(page);
1063 master_chunk->offset = 0;
1064 rx_ring->chunk_dma_addr = dma_addr;
1065 }
1066
1067 lbq_desc->p.pg_chunk = *master_chunk;
1068 lbq_desc->dma_addr = rx_ring->chunk_dma_addr;
1069 *lbq_desc->buf_ptr = cpu_to_le64(lbq_desc->dma_addr +
1070 lbq_desc->p.pg_chunk.offset);
1071
1072
1073
1074
1075 master_chunk->offset += qdev->lbq_buf_size;
1076 if (master_chunk->offset == qlge_lbq_block_size(qdev)) {
1077 master_chunk->page = NULL;
1078 } else {
1079 master_chunk->va += qdev->lbq_buf_size;
1080 get_page(master_chunk->page);
1081 }
1082
1083 return 0;
1084}
1085
1086
1087static int qlge_refill_bq(struct qlge_bq *bq, gfp_t gfp)
1088{
1089 struct rx_ring *rx_ring = QLGE_BQ_CONTAINER(bq);
1090 struct qlge_adapter *qdev = rx_ring->qdev;
1091 struct qlge_bq_desc *bq_desc;
1092 int refill_count;
1093 int retval;
1094 int i;
1095
1096 refill_count = QLGE_BQ_WRAP(QLGE_BQ_ALIGN(bq->next_to_clean - 1) -
1097 bq->next_to_use);
1098 if (!refill_count)
1099 return 0;
1100
1101 i = bq->next_to_use;
1102 bq_desc = &bq->queue[i];
1103 i -= QLGE_BQ_LEN;
1104 do {
1105 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1106 "ring %u %s: try cleaning idx %d\n",
1107 rx_ring->cq_id, bq_type_name[bq->type], i);
1108
1109 if (bq->type == QLGE_SB)
1110 retval = qlge_refill_sb(rx_ring, bq_desc, gfp);
1111 else
1112 retval = qlge_refill_lb(rx_ring, bq_desc, gfp);
1113 if (retval < 0) {
1114 netif_err(qdev, ifup, qdev->ndev,
1115 "ring %u %s: Could not get a page chunk, idx %d\n",
1116 rx_ring->cq_id, bq_type_name[bq->type], i);
1117 break;
1118 }
1119
1120 bq_desc++;
1121 i++;
1122 if (unlikely(!i)) {
1123 bq_desc = &bq->queue[0];
1124 i -= QLGE_BQ_LEN;
1125 }
1126 refill_count--;
1127 } while (refill_count);
1128 i += QLGE_BQ_LEN;
1129
1130 if (bq->next_to_use != i) {
1131 if (QLGE_BQ_ALIGN(bq->next_to_use) != QLGE_BQ_ALIGN(i)) {
1132 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1133 "ring %u %s: updating prod idx = %d.\n",
1134 rx_ring->cq_id, bq_type_name[bq->type],
1135 i);
1136 qlge_write_db_reg(i, bq->prod_idx_db_reg);
1137 }
1138 bq->next_to_use = i;
1139 }
1140
1141 return retval;
1142}
1143
1144static void qlge_update_buffer_queues(struct rx_ring *rx_ring, gfp_t gfp,
1145 unsigned long delay)
1146{
1147 bool sbq_fail, lbq_fail;
1148
1149 sbq_fail = !!qlge_refill_bq(&rx_ring->sbq, gfp);
1150 lbq_fail = !!qlge_refill_bq(&rx_ring->lbq, gfp);
1151
1152
1153
1154
1155
1156
1157
1158 if ((sbq_fail && QLGE_BQ_HW_OWNED(&rx_ring->sbq) < 2) ||
1159 (lbq_fail && QLGE_BQ_HW_OWNED(&rx_ring->lbq) <
1160 DIV_ROUND_UP(9000, LARGE_BUFFER_MAX_SIZE)))
1161
1162
1163
1164
1165 queue_delayed_work_on(smp_processor_id(), system_long_wq,
1166 &rx_ring->refill_work, delay);
1167}
1168
1169static void qlge_slow_refill(struct work_struct *work)
1170{
1171 struct rx_ring *rx_ring = container_of(work, struct rx_ring,
1172 refill_work.work);
1173 struct napi_struct *napi = &rx_ring->napi;
1174
1175 napi_disable(napi);
1176 qlge_update_buffer_queues(rx_ring, GFP_KERNEL, HZ / 2);
1177 napi_enable(napi);
1178
1179 local_bh_disable();
1180
1181
1182
1183 napi_schedule(napi);
1184
1185 local_bh_enable();
1186}
1187
1188
1189
1190
1191static void qlge_unmap_send(struct qlge_adapter *qdev,
1192 struct tx_ring_desc *tx_ring_desc, int mapped)
1193{
1194 int i;
1195
1196 for (i = 0; i < mapped; i++) {
1197 if (i == 0 || (i == 7 && mapped > 7)) {
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207 if (i == 7) {
1208 netif_printk(qdev, tx_done, KERN_DEBUG,
1209 qdev->ndev,
1210 "unmapping OAL area.\n");
1211 }
1212 dma_unmap_single(&qdev->pdev->dev,
1213 dma_unmap_addr(&tx_ring_desc->map[i],
1214 mapaddr),
1215 dma_unmap_len(&tx_ring_desc->map[i],
1216 maplen),
1217 DMA_TO_DEVICE);
1218 } else {
1219 netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
1220 "unmapping frag %d.\n", i);
1221 dma_unmap_page(&qdev->pdev->dev,
1222 dma_unmap_addr(&tx_ring_desc->map[i],
1223 mapaddr),
1224 dma_unmap_len(&tx_ring_desc->map[i],
1225 maplen), DMA_TO_DEVICE);
1226 }
1227 }
1228}
1229
1230
1231
1232
1233static int qlge_map_send(struct qlge_adapter *qdev,
1234 struct qlge_ob_mac_iocb_req *mac_iocb_ptr,
1235 struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1236{
1237 int len = skb_headlen(skb);
1238 dma_addr_t map;
1239 int frag_idx, err, map_idx = 0;
1240 struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1241 int frag_cnt = skb_shinfo(skb)->nr_frags;
1242
1243 if (frag_cnt) {
1244 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
1245 "frag_cnt = %d.\n", frag_cnt);
1246 }
1247
1248
1249
1250 map = dma_map_single(&qdev->pdev->dev, skb->data, len, DMA_TO_DEVICE);
1251
1252 err = dma_mapping_error(&qdev->pdev->dev, map);
1253 if (err) {
1254 netif_err(qdev, tx_queued, qdev->ndev,
1255 "PCI mapping failed with error: %d\n", err);
1256
1257 return NETDEV_TX_BUSY;
1258 }
1259
1260 tbd->len = cpu_to_le32(len);
1261 tbd->addr = cpu_to_le64(map);
1262 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1263 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
1264 map_idx++;
1265
1266
1267
1268
1269
1270
1271
1272
1273 for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1274 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1275
1276 tbd++;
1277 if (frag_idx == 6 && frag_cnt > 7) {
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297 map = dma_map_single(&qdev->pdev->dev, &tx_ring_desc->oal,
1298 sizeof(struct qlge_oal),
1299 DMA_TO_DEVICE);
1300 err = dma_mapping_error(&qdev->pdev->dev, map);
1301 if (err) {
1302 netif_err(qdev, tx_queued, qdev->ndev,
1303 "PCI mapping outbound address list with error: %d\n",
1304 err);
1305 goto map_error;
1306 }
1307
1308 tbd->addr = cpu_to_le64(map);
1309
1310
1311
1312
1313
1314 tbd->len =
1315 cpu_to_le32((sizeof(struct tx_buf_desc) *
1316 (frag_cnt - frag_idx)) | TX_DESC_C);
1317 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
1318 map);
1319 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1320 sizeof(struct qlge_oal));
1321 tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1322 map_idx++;
1323 }
1324
1325 map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag),
1326 DMA_TO_DEVICE);
1327
1328 err = dma_mapping_error(&qdev->pdev->dev, map);
1329 if (err) {
1330 netif_err(qdev, tx_queued, qdev->ndev,
1331 "PCI mapping frags failed with error: %d.\n",
1332 err);
1333 goto map_error;
1334 }
1335
1336 tbd->addr = cpu_to_le64(map);
1337 tbd->len = cpu_to_le32(skb_frag_size(frag));
1338 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1339 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1340 skb_frag_size(frag));
1341 }
1342
1343 tx_ring_desc->map_cnt = map_idx;
1344
1345 tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1346 return NETDEV_TX_OK;
1347
1348map_error:
1349
1350
1351
1352
1353
1354
1355 qlge_unmap_send(qdev, tx_ring_desc, map_idx);
1356 return NETDEV_TX_BUSY;
1357}
1358
1359
1360static void qlge_categorize_rx_err(struct qlge_adapter *qdev, u8 rx_err,
1361 struct rx_ring *rx_ring)
1362{
1363 struct nic_stats *stats = &qdev->nic_stats;
1364
1365 stats->rx_err_count++;
1366 rx_ring->rx_errors++;
1367
1368 switch (rx_err & IB_MAC_IOCB_RSP_ERR_MASK) {
1369 case IB_MAC_IOCB_RSP_ERR_CODE_ERR:
1370 stats->rx_code_err++;
1371 break;
1372 case IB_MAC_IOCB_RSP_ERR_OVERSIZE:
1373 stats->rx_oversize_err++;
1374 break;
1375 case IB_MAC_IOCB_RSP_ERR_UNDERSIZE:
1376 stats->rx_undersize_err++;
1377 break;
1378 case IB_MAC_IOCB_RSP_ERR_PREAMBLE:
1379 stats->rx_preamble_err++;
1380 break;
1381 case IB_MAC_IOCB_RSP_ERR_FRAME_LEN:
1382 stats->rx_frame_len_err++;
1383 break;
1384 case IB_MAC_IOCB_RSP_ERR_CRC:
1385 stats->rx_crc_err++;
1386 break;
1387 default:
1388 break;
1389 }
1390}
1391
1392
1393
1394
1395
1396static void qlge_update_mac_hdr_len(struct qlge_adapter *qdev,
1397 struct qlge_ib_mac_iocb_rsp *ib_mac_rsp,
1398 void *page, size_t *len)
1399{
1400 u16 *tags;
1401
1402 if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
1403 return;
1404 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) {
1405 tags = (u16 *)page;
1406
1407 if (tags[6] == ETH_P_8021Q &&
1408 tags[8] == ETH_P_8021Q)
1409 *len += 2 * VLAN_HLEN;
1410 else
1411 *len += VLAN_HLEN;
1412 }
1413}
1414
1415
1416static void qlge_process_mac_rx_gro_page(struct qlge_adapter *qdev,
1417 struct rx_ring *rx_ring,
1418 struct qlge_ib_mac_iocb_rsp *ib_mac_rsp,
1419 u32 length, u16 vlan_id)
1420{
1421 struct sk_buff *skb;
1422 struct qlge_bq_desc *lbq_desc = qlge_get_curr_lchunk(qdev, rx_ring);
1423 struct napi_struct *napi = &rx_ring->napi;
1424
1425
1426 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1427 qlge_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1428 put_page(lbq_desc->p.pg_chunk.page);
1429 return;
1430 }
1431 napi->dev = qdev->ndev;
1432
1433 skb = napi_get_frags(napi);
1434 if (!skb) {
1435 netif_err(qdev, drv, qdev->ndev,
1436 "Couldn't get an skb, exiting.\n");
1437 rx_ring->rx_dropped++;
1438 put_page(lbq_desc->p.pg_chunk.page);
1439 return;
1440 }
1441 prefetch(lbq_desc->p.pg_chunk.va);
1442 __skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
1443 lbq_desc->p.pg_chunk.page,
1444 lbq_desc->p.pg_chunk.offset,
1445 length);
1446
1447 skb->len += length;
1448 skb->data_len += length;
1449 skb->truesize += length;
1450 skb_shinfo(skb)->nr_frags++;
1451
1452 rx_ring->rx_packets++;
1453 rx_ring->rx_bytes += length;
1454 skb->ip_summed = CHECKSUM_UNNECESSARY;
1455 skb_record_rx_queue(skb, rx_ring->cq_id);
1456 if (vlan_id != 0xffff)
1457 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1458 napi_gro_frags(napi);
1459}
1460
1461
1462static void qlge_process_mac_rx_page(struct qlge_adapter *qdev,
1463 struct rx_ring *rx_ring,
1464 struct qlge_ib_mac_iocb_rsp *ib_mac_rsp,
1465 u32 length, u16 vlan_id)
1466{
1467 struct net_device *ndev = qdev->ndev;
1468 struct sk_buff *skb = NULL;
1469 void *addr;
1470 struct qlge_bq_desc *lbq_desc = qlge_get_curr_lchunk(qdev, rx_ring);
1471 struct napi_struct *napi = &rx_ring->napi;
1472 size_t hlen = ETH_HLEN;
1473
1474 skb = netdev_alloc_skb(ndev, length);
1475 if (!skb) {
1476 rx_ring->rx_dropped++;
1477 put_page(lbq_desc->p.pg_chunk.page);
1478 return;
1479 }
1480
1481 addr = lbq_desc->p.pg_chunk.va;
1482 prefetch(addr);
1483
1484
1485 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1486 qlge_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1487 goto err_out;
1488 }
1489
1490
1491 qlge_update_mac_hdr_len(qdev, ib_mac_rsp, addr, &hlen);
1492
1493
1494
1495
1496 if (skb->len > ndev->mtu + hlen) {
1497 netif_err(qdev, drv, qdev->ndev,
1498 "Segment too small, dropping.\n");
1499 rx_ring->rx_dropped++;
1500 goto err_out;
1501 }
1502 skb_put_data(skb, addr, hlen);
1503 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1504 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1505 length);
1506 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1507 lbq_desc->p.pg_chunk.offset + hlen, length - hlen);
1508 skb->len += length - hlen;
1509 skb->data_len += length - hlen;
1510 skb->truesize += length - hlen;
1511
1512 rx_ring->rx_packets++;
1513 rx_ring->rx_bytes += skb->len;
1514 skb->protocol = eth_type_trans(skb, ndev);
1515 skb_checksum_none_assert(skb);
1516
1517 if ((ndev->features & NETIF_F_RXCSUM) &&
1518 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1519
1520 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1521 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1522 "TCP checksum done!\n");
1523 skb->ip_summed = CHECKSUM_UNNECESSARY;
1524 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1525 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1526
1527 struct iphdr *iph =
1528 (struct iphdr *)((u8 *)addr + hlen);
1529 if (!(iph->frag_off &
1530 htons(IP_MF | IP_OFFSET))) {
1531 skb->ip_summed = CHECKSUM_UNNECESSARY;
1532 netif_printk(qdev, rx_status, KERN_DEBUG,
1533 qdev->ndev,
1534 "UDP checksum done!\n");
1535 }
1536 }
1537 }
1538
1539 skb_record_rx_queue(skb, rx_ring->cq_id);
1540 if (vlan_id != 0xffff)
1541 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1542 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1543 napi_gro_receive(napi, skb);
1544 else
1545 netif_receive_skb(skb);
1546 return;
1547err_out:
1548 dev_kfree_skb_any(skb);
1549 put_page(lbq_desc->p.pg_chunk.page);
1550}
1551
1552
1553static void qlge_process_mac_rx_skb(struct qlge_adapter *qdev,
1554 struct rx_ring *rx_ring,
1555 struct qlge_ib_mac_iocb_rsp *ib_mac_rsp,
1556 u32 length, u16 vlan_id)
1557{
1558 struct qlge_bq_desc *sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
1559 struct net_device *ndev = qdev->ndev;
1560 struct sk_buff *skb, *new_skb;
1561
1562 skb = sbq_desc->p.skb;
1563
1564 new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
1565 if (!new_skb) {
1566 rx_ring->rx_dropped++;
1567 return;
1568 }
1569 skb_reserve(new_skb, NET_IP_ALIGN);
1570
1571 dma_sync_single_for_cpu(&qdev->pdev->dev, sbq_desc->dma_addr,
1572 SMALL_BUF_MAP_SIZE, DMA_FROM_DEVICE);
1573
1574 skb_put_data(new_skb, skb->data, length);
1575
1576 skb = new_skb;
1577
1578
1579 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1580 qlge_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1581 dev_kfree_skb_any(skb);
1582 return;
1583 }
1584
1585
1586 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1587 qlge_check_lb_frame(qdev, skb);
1588 dev_kfree_skb_any(skb);
1589 return;
1590 }
1591
1592
1593
1594
1595 if (skb->len > ndev->mtu + ETH_HLEN) {
1596 dev_kfree_skb_any(skb);
1597 rx_ring->rx_dropped++;
1598 return;
1599 }
1600
1601 prefetch(skb->data);
1602 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1603 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1604 "%s Multicast.\n",
1605 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1606 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1607 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1608 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1609 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1610 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1611 }
1612 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
1613 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1614 "Promiscuous Packet.\n");
1615
1616 rx_ring->rx_packets++;
1617 rx_ring->rx_bytes += skb->len;
1618 skb->protocol = eth_type_trans(skb, ndev);
1619 skb_checksum_none_assert(skb);
1620
1621
1622
1623
1624 if ((ndev->features & NETIF_F_RXCSUM) &&
1625 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1626
1627 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1628 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1629 "TCP checksum done!\n");
1630 skb->ip_summed = CHECKSUM_UNNECESSARY;
1631 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1632 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1633
1634 struct iphdr *iph = (struct iphdr *)skb->data;
1635
1636 if (!(iph->frag_off &
1637 htons(IP_MF | IP_OFFSET))) {
1638 skb->ip_summed = CHECKSUM_UNNECESSARY;
1639 netif_printk(qdev, rx_status, KERN_DEBUG,
1640 qdev->ndev,
1641 "UDP checksum done!\n");
1642 }
1643 }
1644 }
1645
1646 skb_record_rx_queue(skb, rx_ring->cq_id);
1647 if (vlan_id != 0xffff)
1648 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1649 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1650 napi_gro_receive(&rx_ring->napi, skb);
1651 else
1652 netif_receive_skb(skb);
1653}
1654
1655static void qlge_realign_skb(struct sk_buff *skb, int len)
1656{
1657 void *temp_addr = skb->data;
1658
1659
1660
1661
1662
1663 skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1664 skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1665 memmove(skb->data, temp_addr, len);
1666}
1667
1668
1669
1670
1671
1672
1673static struct sk_buff *qlge_build_rx_skb(struct qlge_adapter *qdev,
1674 struct rx_ring *rx_ring,
1675 struct qlge_ib_mac_iocb_rsp *ib_mac_rsp)
1676{
1677 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1678 u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1679 struct qlge_bq_desc *lbq_desc, *sbq_desc;
1680 struct sk_buff *skb = NULL;
1681 size_t hlen = ETH_HLEN;
1682
1683
1684
1685
1686 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1687 ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1688 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1689 "Header of %d bytes in small buffer.\n", hdr_len);
1690
1691
1692
1693 sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
1694 dma_unmap_single(&qdev->pdev->dev, sbq_desc->dma_addr,
1695 SMALL_BUF_MAP_SIZE, DMA_FROM_DEVICE);
1696 skb = sbq_desc->p.skb;
1697 qlge_realign_skb(skb, hdr_len);
1698 skb_put(skb, hdr_len);
1699 sbq_desc->p.skb = NULL;
1700 }
1701
1702
1703
1704
1705 if (unlikely(!length)) {
1706 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1707 "No Data buffer in this packet.\n");
1708 return skb;
1709 }
1710
1711 if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1712 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1713 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1714 "Headers in small, data of %d bytes in small, combine them.\n",
1715 length);
1716
1717
1718
1719
1720
1721
1722
1723 sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
1724 dma_sync_single_for_cpu(&qdev->pdev->dev,
1725 sbq_desc->dma_addr,
1726 SMALL_BUF_MAP_SIZE,
1727 DMA_FROM_DEVICE);
1728 skb_put_data(skb, sbq_desc->p.skb->data, length);
1729 } else {
1730 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1731 "%d bytes in a single small buffer.\n",
1732 length);
1733 sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
1734 skb = sbq_desc->p.skb;
1735 qlge_realign_skb(skb, length);
1736 skb_put(skb, length);
1737 dma_unmap_single(&qdev->pdev->dev, sbq_desc->dma_addr,
1738 SMALL_BUF_MAP_SIZE,
1739 DMA_FROM_DEVICE);
1740 sbq_desc->p.skb = NULL;
1741 }
1742 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1743 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1744 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1745 "Header in small, %d bytes in large. Chain large to small!\n",
1746 length);
1747
1748
1749
1750
1751
1752 lbq_desc = qlge_get_curr_lchunk(qdev, rx_ring);
1753 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1754 "Chaining page at offset = %d, for %d bytes to skb.\n",
1755 lbq_desc->p.pg_chunk.offset, length);
1756 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1757 lbq_desc->p.pg_chunk.offset, length);
1758 skb->len += length;
1759 skb->data_len += length;
1760 skb->truesize += length;
1761 } else {
1762
1763
1764
1765
1766
1767 lbq_desc = qlge_get_curr_lchunk(qdev, rx_ring);
1768 skb = netdev_alloc_skb(qdev->ndev, length);
1769 if (!skb) {
1770 netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
1771 "No skb available, drop the packet.\n");
1772 return NULL;
1773 }
1774 dma_unmap_page(&qdev->pdev->dev, lbq_desc->dma_addr,
1775 qdev->lbq_buf_size,
1776 DMA_FROM_DEVICE);
1777 skb_reserve(skb, NET_IP_ALIGN);
1778 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1779 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1780 length);
1781 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1782 lbq_desc->p.pg_chunk.offset,
1783 length);
1784 skb->len += length;
1785 skb->data_len += length;
1786 skb->truesize += length;
1787 qlge_update_mac_hdr_len(qdev, ib_mac_rsp,
1788 lbq_desc->p.pg_chunk.va,
1789 &hlen);
1790 __pskb_pull_tail(skb, hlen);
1791 }
1792 } else {
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804 int size, i = 0;
1805
1806 sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
1807 dma_unmap_single(&qdev->pdev->dev, sbq_desc->dma_addr,
1808 SMALL_BUF_MAP_SIZE, DMA_FROM_DEVICE);
1809 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1820 "%d bytes of headers & data in chain of large.\n",
1821 length);
1822 skb = sbq_desc->p.skb;
1823 sbq_desc->p.skb = NULL;
1824 skb_reserve(skb, NET_IP_ALIGN);
1825 }
1826 do {
1827 lbq_desc = qlge_get_curr_lchunk(qdev, rx_ring);
1828 size = min(length, qdev->lbq_buf_size);
1829
1830 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1831 "Adding page %d to skb for %d bytes.\n",
1832 i, size);
1833 skb_fill_page_desc(skb, i,
1834 lbq_desc->p.pg_chunk.page,
1835 lbq_desc->p.pg_chunk.offset, size);
1836 skb->len += size;
1837 skb->data_len += size;
1838 skb->truesize += size;
1839 length -= size;
1840 i++;
1841 } while (length > 0);
1842 qlge_update_mac_hdr_len(qdev, ib_mac_rsp, lbq_desc->p.pg_chunk.va,
1843 &hlen);
1844 __pskb_pull_tail(skb, hlen);
1845 }
1846 return skb;
1847}
1848
1849
1850static void qlge_process_mac_split_rx_intr(struct qlge_adapter *qdev,
1851 struct rx_ring *rx_ring,
1852 struct qlge_ib_mac_iocb_rsp *ib_mac_rsp,
1853 u16 vlan_id)
1854{
1855 struct net_device *ndev = qdev->ndev;
1856 struct sk_buff *skb = NULL;
1857
1858 skb = qlge_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1859 if (unlikely(!skb)) {
1860 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1861 "No skb available, drop packet.\n");
1862 rx_ring->rx_dropped++;
1863 return;
1864 }
1865
1866
1867 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1868 qlge_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1869 dev_kfree_skb_any(skb);
1870 return;
1871 }
1872
1873
1874
1875
1876 if (skb->len > ndev->mtu + ETH_HLEN) {
1877 dev_kfree_skb_any(skb);
1878 rx_ring->rx_dropped++;
1879 return;
1880 }
1881
1882
1883 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1884 qlge_check_lb_frame(qdev, skb);
1885 dev_kfree_skb_any(skb);
1886 return;
1887 }
1888
1889 prefetch(skb->data);
1890 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1891 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
1892 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1893 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1894 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1895 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1896 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1897 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1898 rx_ring->rx_multicast++;
1899 }
1900 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
1901 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1902 "Promiscuous Packet.\n");
1903 }
1904
1905 skb->protocol = eth_type_trans(skb, ndev);
1906 skb_checksum_none_assert(skb);
1907
1908
1909
1910
1911 if ((ndev->features & NETIF_F_RXCSUM) &&
1912 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1913
1914 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1915 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1916 "TCP checksum done!\n");
1917 skb->ip_summed = CHECKSUM_UNNECESSARY;
1918 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1919 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1920
1921 struct iphdr *iph = (struct iphdr *)skb->data;
1922
1923 if (!(iph->frag_off &
1924 htons(IP_MF | IP_OFFSET))) {
1925 skb->ip_summed = CHECKSUM_UNNECESSARY;
1926 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1927 "TCP checksum done!\n");
1928 }
1929 }
1930 }
1931
1932 rx_ring->rx_packets++;
1933 rx_ring->rx_bytes += skb->len;
1934 skb_record_rx_queue(skb, rx_ring->cq_id);
1935 if (vlan_id != 0xffff)
1936 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1937 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1938 napi_gro_receive(&rx_ring->napi, skb);
1939 else
1940 netif_receive_skb(skb);
1941}
1942
1943
1944static unsigned long qlge_process_mac_rx_intr(struct qlge_adapter *qdev,
1945 struct rx_ring *rx_ring,
1946 struct qlge_ib_mac_iocb_rsp *ib_mac_rsp)
1947{
1948 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1949 u16 vlan_id = ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
1950 (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)) ?
1951 ((le16_to_cpu(ib_mac_rsp->vlan_id) &
1952 IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
1953
1954 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
1955
1956
1957
1958 qlge_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
1959 vlan_id);
1960 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1961
1962
1963
1964
1965 qlge_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp, length,
1966 vlan_id);
1967 } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
1968 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
1969 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
1970
1971
1972
1973 qlge_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp, length,
1974 vlan_id);
1975 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1976
1977
1978
1979 qlge_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp, length,
1980 vlan_id);
1981 } else {
1982
1983
1984
1985 qlge_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
1986 vlan_id);
1987 }
1988
1989 return (unsigned long)length;
1990}
1991
1992
1993static void qlge_process_mac_tx_intr(struct qlge_adapter *qdev,
1994 struct qlge_ob_mac_iocb_rsp *mac_rsp)
1995{
1996 struct tx_ring *tx_ring;
1997 struct tx_ring_desc *tx_ring_desc;
1998
1999 tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
2000 tx_ring_desc = &tx_ring->q[mac_rsp->tid];
2001 qlge_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
2002 tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
2003 tx_ring->tx_packets++;
2004 dev_kfree_skb(tx_ring_desc->skb);
2005 tx_ring_desc->skb = NULL;
2006
2007 if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
2008 OB_MAC_IOCB_RSP_S |
2009 OB_MAC_IOCB_RSP_L |
2010 OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
2011 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
2012 netif_warn(qdev, tx_done, qdev->ndev,
2013 "Total descriptor length did not match transfer length.\n");
2014 }
2015 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
2016 netif_warn(qdev, tx_done, qdev->ndev,
2017 "Frame too short to be valid, not sent.\n");
2018 }
2019 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
2020 netif_warn(qdev, tx_done, qdev->ndev,
2021 "Frame too long, but sent anyway.\n");
2022 }
2023 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
2024 netif_warn(qdev, tx_done, qdev->ndev,
2025 "PCI backplane error. Frame not sent.\n");
2026 }
2027 }
2028 atomic_inc(&tx_ring->tx_count);
2029}
2030
2031
2032void qlge_queue_fw_error(struct qlge_adapter *qdev)
2033{
2034 qlge_link_off(qdev);
2035 queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
2036}
2037
2038void qlge_queue_asic_error(struct qlge_adapter *qdev)
2039{
2040 qlge_link_off(qdev);
2041 qlge_disable_interrupts(qdev);
2042
2043
2044
2045
2046 clear_bit(QL_ADAPTER_UP, &qdev->flags);
2047
2048
2049
2050 set_bit(QL_ASIC_RECOVERY, &qdev->flags);
2051 queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
2052}
2053
2054static void qlge_process_chip_ae_intr(struct qlge_adapter *qdev,
2055 struct qlge_ib_ae_iocb_rsp *ib_ae_rsp)
2056{
2057 switch (ib_ae_rsp->event) {
2058 case MGMT_ERR_EVENT:
2059 netif_err(qdev, rx_err, qdev->ndev,
2060 "Management Processor Fatal Error.\n");
2061 qlge_queue_fw_error(qdev);
2062 return;
2063
2064 case CAM_LOOKUP_ERR_EVENT:
2065 netdev_err(qdev->ndev, "Multiple CAM hits lookup occurred.\n");
2066 netdev_err(qdev->ndev, "This event shouldn't occur.\n");
2067 qlge_queue_asic_error(qdev);
2068 return;
2069
2070 case SOFT_ECC_ERROR_EVENT:
2071 netdev_err(qdev->ndev, "Soft ECC error detected.\n");
2072 qlge_queue_asic_error(qdev);
2073 break;
2074
2075 case PCI_ERR_ANON_BUF_RD:
2076 netdev_err(qdev->ndev,
2077 "PCI error occurred when reading anonymous buffers from rx_ring %d.\n",
2078 ib_ae_rsp->q_id);
2079 qlge_queue_asic_error(qdev);
2080 break;
2081
2082 default:
2083 netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
2084 ib_ae_rsp->event);
2085 qlge_queue_asic_error(qdev);
2086 break;
2087 }
2088}
2089
2090static int qlge_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2091{
2092 struct qlge_adapter *qdev = rx_ring->qdev;
2093 u32 prod = qlge_read_sh_reg(rx_ring->prod_idx_sh_reg);
2094 struct qlge_ob_mac_iocb_rsp *net_rsp = NULL;
2095 int count = 0;
2096
2097 struct tx_ring *tx_ring;
2098
2099 while (prod != rx_ring->cnsmr_idx) {
2100 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2101 "cq_id = %d, prod = %d, cnsmr = %d\n",
2102 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2103
2104 net_rsp = (struct qlge_ob_mac_iocb_rsp *)rx_ring->curr_entry;
2105 rmb();
2106 switch (net_rsp->opcode) {
2107 case OPCODE_OB_MAC_TSO_IOCB:
2108 case OPCODE_OB_MAC_IOCB:
2109 qlge_process_mac_tx_intr(qdev, net_rsp);
2110 break;
2111 default:
2112 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2113 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2114 net_rsp->opcode);
2115 }
2116 count++;
2117 qlge_update_cq(rx_ring);
2118 prod = qlge_read_sh_reg(rx_ring->prod_idx_sh_reg);
2119 }
2120 if (!net_rsp)
2121 return 0;
2122 qlge_write_cq_idx(rx_ring);
2123 tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
2124 if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
2125 if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2126
2127
2128
2129
2130 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2131 }
2132
2133 return count;
2134}
2135
2136static int qlge_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
2137{
2138 struct qlge_adapter *qdev = rx_ring->qdev;
2139 u32 prod = qlge_read_sh_reg(rx_ring->prod_idx_sh_reg);
2140 struct qlge_net_rsp_iocb *net_rsp;
2141 int count = 0;
2142
2143
2144 while (prod != rx_ring->cnsmr_idx) {
2145 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2146 "cq_id = %d, prod = %d, cnsmr = %d\n",
2147 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2148
2149 net_rsp = rx_ring->curr_entry;
2150 rmb();
2151 switch (net_rsp->opcode) {
2152 case OPCODE_IB_MAC_IOCB:
2153 qlge_process_mac_rx_intr(qdev, rx_ring,
2154 (struct qlge_ib_mac_iocb_rsp *)
2155 net_rsp);
2156 break;
2157
2158 case OPCODE_IB_AE_IOCB:
2159 qlge_process_chip_ae_intr(qdev, (struct qlge_ib_ae_iocb_rsp *)
2160 net_rsp);
2161 break;
2162 default:
2163 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2164 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2165 net_rsp->opcode);
2166 break;
2167 }
2168 count++;
2169 qlge_update_cq(rx_ring);
2170 prod = qlge_read_sh_reg(rx_ring->prod_idx_sh_reg);
2171 if (count == budget)
2172 break;
2173 }
2174 qlge_update_buffer_queues(rx_ring, GFP_ATOMIC, 0);
2175 qlge_write_cq_idx(rx_ring);
2176 return count;
2177}
2178
2179static int qlge_napi_poll_msix(struct napi_struct *napi, int budget)
2180{
2181 struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
2182 struct qlge_adapter *qdev = rx_ring->qdev;
2183 struct rx_ring *trx_ring;
2184 int i, work_done = 0;
2185 struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
2186
2187 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2188 "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
2189
2190
2191
2192
2193 for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
2194 trx_ring = &qdev->rx_ring[i];
2195
2196
2197
2198 if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
2199 (qlge_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
2200 trx_ring->cnsmr_idx)) {
2201 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2202 "%s: Servicing TX completion ring %d.\n",
2203 __func__, trx_ring->cq_id);
2204 qlge_clean_outbound_rx_ring(trx_ring);
2205 }
2206 }
2207
2208
2209
2210
2211 if (qlge_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
2212 rx_ring->cnsmr_idx) {
2213 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2214 "%s: Servicing RX completion ring %d.\n",
2215 __func__, rx_ring->cq_id);
2216 work_done = qlge_clean_inbound_rx_ring(rx_ring, budget);
2217 }
2218
2219 if (work_done < budget) {
2220 napi_complete_done(napi, work_done);
2221 qlge_enable_completion_interrupt(qdev, rx_ring->irq);
2222 }
2223 return work_done;
2224}
2225
2226static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features)
2227{
2228 struct qlge_adapter *qdev = netdev_to_qdev(ndev);
2229
2230 if (features & NETIF_F_HW_VLAN_CTAG_RX) {
2231 qlge_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
2232 NIC_RCV_CFG_VLAN_MATCH_AND_NON);
2233 } else {
2234 qlge_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
2235 }
2236}
2237
2238
2239
2240
2241
2242static int qlge_update_hw_vlan_features(struct net_device *ndev,
2243 netdev_features_t features)
2244{
2245 struct qlge_adapter *qdev = netdev_to_qdev(ndev);
2246 bool need_restart = netif_running(ndev);
2247 int status = 0;
2248
2249 if (need_restart) {
2250 status = qlge_adapter_down(qdev);
2251 if (status) {
2252 netif_err(qdev, link, qdev->ndev,
2253 "Failed to bring down the adapter\n");
2254 return status;
2255 }
2256 }
2257
2258
2259 ndev->features = features;
2260
2261 if (need_restart) {
2262 status = qlge_adapter_up(qdev);
2263 if (status) {
2264 netif_err(qdev, link, qdev->ndev,
2265 "Failed to bring up the adapter\n");
2266 return status;
2267 }
2268 }
2269
2270 return status;
2271}
2272
2273static int qlge_set_features(struct net_device *ndev,
2274 netdev_features_t features)
2275{
2276 netdev_features_t changed = ndev->features ^ features;
2277 int err;
2278
2279 if (changed & NETIF_F_HW_VLAN_CTAG_RX) {
2280
2281 err = qlge_update_hw_vlan_features(ndev, features);
2282 if (err)
2283 return err;
2284
2285 qlge_vlan_mode(ndev, features);
2286 }
2287
2288 return 0;
2289}
2290
2291static int __qlge_vlan_rx_add_vid(struct qlge_adapter *qdev, u16 vid)
2292{
2293 u32 enable_bit = MAC_ADDR_E;
2294 int err;
2295
2296 err = qlge_set_mac_addr_reg(qdev, (u8 *)&enable_bit,
2297 MAC_ADDR_TYPE_VLAN, vid);
2298 if (err)
2299 netif_err(qdev, ifup, qdev->ndev,
2300 "Failed to init vlan address.\n");
2301 return err;
2302}
2303
2304static int qlge_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
2305{
2306 struct qlge_adapter *qdev = netdev_to_qdev(ndev);
2307 int status;
2308 int err;
2309
2310 status = qlge_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2311 if (status)
2312 return status;
2313
2314 err = __qlge_vlan_rx_add_vid(qdev, vid);
2315 set_bit(vid, qdev->active_vlans);
2316
2317 qlge_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2318
2319 return err;
2320}
2321
2322static int __qlge_vlan_rx_kill_vid(struct qlge_adapter *qdev, u16 vid)
2323{
2324 u32 enable_bit = 0;
2325 int err;
2326
2327 err = qlge_set_mac_addr_reg(qdev, (u8 *)&enable_bit,
2328 MAC_ADDR_TYPE_VLAN, vid);
2329 if (err)
2330 netif_err(qdev, ifup, qdev->ndev,
2331 "Failed to clear vlan address.\n");
2332 return err;
2333}
2334
2335static int qlge_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
2336{
2337 struct qlge_adapter *qdev = netdev_to_qdev(ndev);
2338 int status;
2339 int err;
2340
2341 status = qlge_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2342 if (status)
2343 return status;
2344
2345 err = __qlge_vlan_rx_kill_vid(qdev, vid);
2346 clear_bit(vid, qdev->active_vlans);
2347
2348 qlge_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2349
2350 return err;
2351}
2352
2353static void qlge_restore_vlan(struct qlge_adapter *qdev)
2354{
2355 int status;
2356 u16 vid;
2357
2358 status = qlge_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2359 if (status)
2360 return;
2361
2362 for_each_set_bit(vid, qdev->active_vlans, VLAN_N_VID)
2363 __qlge_vlan_rx_add_vid(qdev, vid);
2364
2365 qlge_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2366}
2367
2368
2369static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
2370{
2371 struct rx_ring *rx_ring = dev_id;
2372
2373 napi_schedule(&rx_ring->napi);
2374 return IRQ_HANDLED;
2375}
2376
2377
2378
2379
2380
2381
2382static irqreturn_t qlge_isr(int irq, void *dev_id)
2383{
2384 struct rx_ring *rx_ring = dev_id;
2385 struct qlge_adapter *qdev = rx_ring->qdev;
2386 struct intr_context *intr_context = &qdev->intr_context[0];
2387 u32 var;
2388 int work_done = 0;
2389
2390
2391
2392
2393
2394
2395
2396 if (!test_bit(QL_MSIX_ENABLED, &qdev->flags))
2397 qlge_disable_completion_interrupt(qdev, 0);
2398
2399 var = qlge_read32(qdev, STS);
2400
2401
2402
2403
2404 if (var & STS_FE) {
2405 qlge_disable_completion_interrupt(qdev, 0);
2406 qlge_queue_asic_error(qdev);
2407 netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var);
2408 var = qlge_read32(qdev, ERR_STS);
2409 netdev_err(qdev->ndev, "Resetting chip. Error Status Register = 0x%x\n", var);
2410 return IRQ_HANDLED;
2411 }
2412
2413
2414
2415
2416 if ((var & STS_PI) &&
2417 (qlge_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
2418
2419
2420
2421
2422 netif_err(qdev, intr, qdev->ndev,
2423 "Got MPI processor interrupt.\n");
2424 qlge_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2425 queue_delayed_work_on(smp_processor_id(),
2426 qdev->workqueue, &qdev->mpi_work, 0);
2427 work_done++;
2428 }
2429
2430
2431
2432
2433
2434
2435 var = qlge_read32(qdev, ISR1);
2436 if (var & intr_context->irq_mask) {
2437 netif_info(qdev, intr, qdev->ndev,
2438 "Waking handler for rx_ring[0].\n");
2439 napi_schedule(&rx_ring->napi);
2440 work_done++;
2441 } else {
2442
2443
2444
2445
2446
2447
2448 qlge_enable_completion_interrupt(qdev, 0);
2449 }
2450
2451 return work_done ? IRQ_HANDLED : IRQ_NONE;
2452}
2453
2454static int qlge_tso(struct sk_buff *skb, struct qlge_ob_mac_tso_iocb_req *mac_iocb_ptr)
2455{
2456 if (skb_is_gso(skb)) {
2457 int err;
2458 __be16 l3_proto = vlan_get_protocol(skb);
2459
2460 err = skb_cow_head(skb, 0);
2461 if (err < 0)
2462 return err;
2463
2464 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2465 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2466 mac_iocb_ptr->frame_len = cpu_to_le32((u32)skb->len);
2467 mac_iocb_ptr->total_hdrs_len =
2468 cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
2469 mac_iocb_ptr->net_trans_offset =
2470 cpu_to_le16(skb_network_offset(skb) |
2471 skb_transport_offset(skb)
2472 << OB_MAC_TRANSPORT_HDR_SHIFT);
2473 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2474 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2475 if (likely(l3_proto == htons(ETH_P_IP))) {
2476 struct iphdr *iph = ip_hdr(skb);
2477
2478 iph->check = 0;
2479 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2480 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2481 iph->daddr, 0,
2482 IPPROTO_TCP,
2483 0);
2484 } else if (l3_proto == htons(ETH_P_IPV6)) {
2485 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2486 tcp_hdr(skb)->check =
2487 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2488 &ipv6_hdr(skb)->daddr,
2489 0, IPPROTO_TCP, 0);
2490 }
2491 return 1;
2492 }
2493 return 0;
2494}
2495
2496static void qlge_hw_csum_setup(struct sk_buff *skb,
2497 struct qlge_ob_mac_tso_iocb_req *mac_iocb_ptr)
2498{
2499 int len;
2500 struct iphdr *iph = ip_hdr(skb);
2501 __sum16 *check;
2502
2503 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2504 mac_iocb_ptr->frame_len = cpu_to_le32((u32)skb->len);
2505 mac_iocb_ptr->net_trans_offset =
2506 cpu_to_le16(skb_network_offset(skb) |
2507 skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2508
2509 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2510 len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2511 if (likely(iph->protocol == IPPROTO_TCP)) {
2512 check = &(tcp_hdr(skb)->check);
2513 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2514 mac_iocb_ptr->total_hdrs_len =
2515 cpu_to_le16(skb_transport_offset(skb) +
2516 (tcp_hdr(skb)->doff << 2));
2517 } else {
2518 check = &(udp_hdr(skb)->check);
2519 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2520 mac_iocb_ptr->total_hdrs_len =
2521 cpu_to_le16(skb_transport_offset(skb) +
2522 sizeof(struct udphdr));
2523 }
2524 *check = ~csum_tcpudp_magic(iph->saddr,
2525 iph->daddr, len, iph->protocol, 0);
2526}
2527
2528static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
2529{
2530 struct qlge_adapter *qdev = netdev_to_qdev(ndev);
2531 struct qlge_ob_mac_iocb_req *mac_iocb_ptr;
2532 struct tx_ring_desc *tx_ring_desc;
2533 int tso;
2534 struct tx_ring *tx_ring;
2535 u32 tx_ring_idx = (u32)skb->queue_mapping;
2536
2537 tx_ring = &qdev->tx_ring[tx_ring_idx];
2538
2539 if (skb_padto(skb, ETH_ZLEN))
2540 return NETDEV_TX_OK;
2541
2542 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2543 netif_info(qdev, tx_queued, qdev->ndev,
2544 "%s: BUG! shutting down tx queue %d due to lack of resources.\n",
2545 __func__, tx_ring_idx);
2546 netif_stop_subqueue(ndev, tx_ring->wq_id);
2547 tx_ring->tx_errors++;
2548 return NETDEV_TX_BUSY;
2549 }
2550 tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2551 mac_iocb_ptr = tx_ring_desc->queue_entry;
2552 memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
2553
2554 mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2555 mac_iocb_ptr->tid = tx_ring_desc->index;
2556
2557
2558
2559 mac_iocb_ptr->txq_idx = tx_ring_idx;
2560 tx_ring_desc->skb = skb;
2561
2562 mac_iocb_ptr->frame_len = cpu_to_le16((u16)skb->len);
2563
2564 if (skb_vlan_tag_present(skb)) {
2565 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2566 "Adding a vlan tag %d.\n", skb_vlan_tag_get(skb));
2567 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2568 mac_iocb_ptr->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
2569 }
2570 tso = qlge_tso(skb, (struct qlge_ob_mac_tso_iocb_req *)mac_iocb_ptr);
2571 if (tso < 0) {
2572 dev_kfree_skb_any(skb);
2573 return NETDEV_TX_OK;
2574 } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2575 qlge_hw_csum_setup(skb,
2576 (struct qlge_ob_mac_tso_iocb_req *)mac_iocb_ptr);
2577 }
2578 if (qlge_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2579 NETDEV_TX_OK) {
2580 netif_err(qdev, tx_queued, qdev->ndev,
2581 "Could not map the segments.\n");
2582 tx_ring->tx_errors++;
2583 return NETDEV_TX_BUSY;
2584 }
2585
2586 tx_ring->prod_idx++;
2587 if (tx_ring->prod_idx == tx_ring->wq_len)
2588 tx_ring->prod_idx = 0;
2589 wmb();
2590
2591 qlge_write_db_reg_relaxed(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
2592 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2593 "tx queued, slot %d, len %d\n",
2594 tx_ring->prod_idx, skb->len);
2595
2596 atomic_dec(&tx_ring->tx_count);
2597
2598 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2599 netif_stop_subqueue(ndev, tx_ring->wq_id);
2600 if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2601
2602
2603
2604
2605 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2606 }
2607 return NETDEV_TX_OK;
2608}
2609
2610static void qlge_free_shadow_space(struct qlge_adapter *qdev)
2611{
2612 if (qdev->rx_ring_shadow_reg_area) {
2613 dma_free_coherent(&qdev->pdev->dev,
2614 PAGE_SIZE,
2615 qdev->rx_ring_shadow_reg_area,
2616 qdev->rx_ring_shadow_reg_dma);
2617 qdev->rx_ring_shadow_reg_area = NULL;
2618 }
2619 if (qdev->tx_ring_shadow_reg_area) {
2620 dma_free_coherent(&qdev->pdev->dev,
2621 PAGE_SIZE,
2622 qdev->tx_ring_shadow_reg_area,
2623 qdev->tx_ring_shadow_reg_dma);
2624 qdev->tx_ring_shadow_reg_area = NULL;
2625 }
2626}
2627
2628static int qlge_alloc_shadow_space(struct qlge_adapter *qdev)
2629{
2630 qdev->rx_ring_shadow_reg_area =
2631 dma_alloc_coherent(&qdev->pdev->dev, PAGE_SIZE,
2632 &qdev->rx_ring_shadow_reg_dma, GFP_ATOMIC);
2633 if (!qdev->rx_ring_shadow_reg_area) {
2634 netif_err(qdev, ifup, qdev->ndev,
2635 "Allocation of RX shadow space failed.\n");
2636 return -ENOMEM;
2637 }
2638
2639 qdev->tx_ring_shadow_reg_area =
2640 dma_alloc_coherent(&qdev->pdev->dev, PAGE_SIZE,
2641 &qdev->tx_ring_shadow_reg_dma, GFP_ATOMIC);
2642 if (!qdev->tx_ring_shadow_reg_area) {
2643 netif_err(qdev, ifup, qdev->ndev,
2644 "Allocation of TX shadow space failed.\n");
2645 goto err_wqp_sh_area;
2646 }
2647 return 0;
2648
2649err_wqp_sh_area:
2650 dma_free_coherent(&qdev->pdev->dev,
2651 PAGE_SIZE,
2652 qdev->rx_ring_shadow_reg_area,
2653 qdev->rx_ring_shadow_reg_dma);
2654 return -ENOMEM;
2655}
2656
2657static void qlge_init_tx_ring(struct qlge_adapter *qdev, struct tx_ring *tx_ring)
2658{
2659 struct tx_ring_desc *tx_ring_desc;
2660 int i;
2661 struct qlge_ob_mac_iocb_req *mac_iocb_ptr;
2662
2663 mac_iocb_ptr = tx_ring->wq_base;
2664 tx_ring_desc = tx_ring->q;
2665 for (i = 0; i < tx_ring->wq_len; i++) {
2666 tx_ring_desc->index = i;
2667 tx_ring_desc->skb = NULL;
2668 tx_ring_desc->queue_entry = mac_iocb_ptr;
2669 mac_iocb_ptr++;
2670 tx_ring_desc++;
2671 }
2672 atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2673}
2674
2675static void qlge_free_tx_resources(struct qlge_adapter *qdev,
2676 struct tx_ring *tx_ring)
2677{
2678 if (tx_ring->wq_base) {
2679 dma_free_coherent(&qdev->pdev->dev, tx_ring->wq_size,
2680 tx_ring->wq_base, tx_ring->wq_base_dma);
2681 tx_ring->wq_base = NULL;
2682 }
2683 kfree(tx_ring->q);
2684 tx_ring->q = NULL;
2685}
2686
2687static int qlge_alloc_tx_resources(struct qlge_adapter *qdev,
2688 struct tx_ring *tx_ring)
2689{
2690 tx_ring->wq_base =
2691 dma_alloc_coherent(&qdev->pdev->dev, tx_ring->wq_size,
2692 &tx_ring->wq_base_dma, GFP_ATOMIC);
2693
2694 if (!tx_ring->wq_base ||
2695 tx_ring->wq_base_dma & WQ_ADDR_ALIGN)
2696 goto pci_alloc_err;
2697
2698 tx_ring->q =
2699 kmalloc_array(tx_ring->wq_len, sizeof(struct tx_ring_desc),
2700 GFP_KERNEL);
2701 if (!tx_ring->q)
2702 goto err;
2703
2704 return 0;
2705err:
2706 dma_free_coherent(&qdev->pdev->dev, tx_ring->wq_size,
2707 tx_ring->wq_base, tx_ring->wq_base_dma);
2708 tx_ring->wq_base = NULL;
2709pci_alloc_err:
2710 netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
2711 return -ENOMEM;
2712}
2713
2714static void qlge_free_lbq_buffers(struct qlge_adapter *qdev, struct rx_ring *rx_ring)
2715{
2716 struct qlge_bq *lbq = &rx_ring->lbq;
2717 unsigned int last_offset;
2718
2719 last_offset = qlge_lbq_block_size(qdev) - qdev->lbq_buf_size;
2720 while (lbq->next_to_clean != lbq->next_to_use) {
2721 struct qlge_bq_desc *lbq_desc =
2722 &lbq->queue[lbq->next_to_clean];
2723
2724 if (lbq_desc->p.pg_chunk.offset == last_offset)
2725 dma_unmap_page(&qdev->pdev->dev, lbq_desc->dma_addr,
2726 qlge_lbq_block_size(qdev),
2727 DMA_FROM_DEVICE);
2728 put_page(lbq_desc->p.pg_chunk.page);
2729
2730 lbq->next_to_clean = QLGE_BQ_WRAP(lbq->next_to_clean + 1);
2731 }
2732
2733 if (rx_ring->master_chunk.page) {
2734 dma_unmap_page(&qdev->pdev->dev, rx_ring->chunk_dma_addr,
2735 qlge_lbq_block_size(qdev), DMA_FROM_DEVICE);
2736 put_page(rx_ring->master_chunk.page);
2737 rx_ring->master_chunk.page = NULL;
2738 }
2739}
2740
2741static void qlge_free_sbq_buffers(struct qlge_adapter *qdev, struct rx_ring *rx_ring)
2742{
2743 int i;
2744
2745 for (i = 0; i < QLGE_BQ_LEN; i++) {
2746 struct qlge_bq_desc *sbq_desc = &rx_ring->sbq.queue[i];
2747
2748 if (!sbq_desc) {
2749 netif_err(qdev, ifup, qdev->ndev,
2750 "sbq_desc %d is NULL.\n", i);
2751 return;
2752 }
2753 if (sbq_desc->p.skb) {
2754 dma_unmap_single(&qdev->pdev->dev, sbq_desc->dma_addr,
2755 SMALL_BUF_MAP_SIZE,
2756 DMA_FROM_DEVICE);
2757 dev_kfree_skb(sbq_desc->p.skb);
2758 sbq_desc->p.skb = NULL;
2759 }
2760 }
2761}
2762
2763
2764
2765
2766static void qlge_free_rx_buffers(struct qlge_adapter *qdev)
2767{
2768 int i;
2769
2770 for (i = 0; i < qdev->rx_ring_count; i++) {
2771 struct rx_ring *rx_ring = &qdev->rx_ring[i];
2772
2773 if (rx_ring->lbq.queue)
2774 qlge_free_lbq_buffers(qdev, rx_ring);
2775 if (rx_ring->sbq.queue)
2776 qlge_free_sbq_buffers(qdev, rx_ring);
2777 }
2778}
2779
2780static void qlge_alloc_rx_buffers(struct qlge_adapter *qdev)
2781{
2782 int i;
2783
2784 for (i = 0; i < qdev->rss_ring_count; i++)
2785 qlge_update_buffer_queues(&qdev->rx_ring[i], GFP_KERNEL,
2786 HZ / 2);
2787}
2788
2789static int qlge_init_bq(struct qlge_bq *bq)
2790{
2791 struct rx_ring *rx_ring = QLGE_BQ_CONTAINER(bq);
2792 struct qlge_adapter *qdev = rx_ring->qdev;
2793 struct qlge_bq_desc *bq_desc;
2794 __le64 *buf_ptr;
2795 int i;
2796
2797 bq->base = dma_alloc_coherent(&qdev->pdev->dev, QLGE_BQ_SIZE,
2798 &bq->base_dma, GFP_ATOMIC);
2799 if (!bq->base) {
2800 netif_err(qdev, ifup, qdev->ndev,
2801 "ring %u %s allocation failed.\n", rx_ring->cq_id,
2802 bq_type_name[bq->type]);
2803 return -ENOMEM;
2804 }
2805
2806 bq->queue = kmalloc_array(QLGE_BQ_LEN, sizeof(struct qlge_bq_desc),
2807 GFP_KERNEL);
2808 if (!bq->queue)
2809 return -ENOMEM;
2810
2811 buf_ptr = bq->base;
2812 bq_desc = &bq->queue[0];
2813 for (i = 0; i < QLGE_BQ_LEN; i++, buf_ptr++, bq_desc++) {
2814 bq_desc->p.skb = NULL;
2815 bq_desc->index = i;
2816 bq_desc->buf_ptr = buf_ptr;
2817 }
2818
2819 return 0;
2820}
2821
2822static void qlge_free_rx_resources(struct qlge_adapter *qdev,
2823 struct rx_ring *rx_ring)
2824{
2825
2826 if (rx_ring->sbq.base) {
2827 dma_free_coherent(&qdev->pdev->dev, QLGE_BQ_SIZE,
2828 rx_ring->sbq.base, rx_ring->sbq.base_dma);
2829 rx_ring->sbq.base = NULL;
2830 }
2831
2832
2833 kfree(rx_ring->sbq.queue);
2834 rx_ring->sbq.queue = NULL;
2835
2836
2837 if (rx_ring->lbq.base) {
2838 dma_free_coherent(&qdev->pdev->dev, QLGE_BQ_SIZE,
2839 rx_ring->lbq.base, rx_ring->lbq.base_dma);
2840 rx_ring->lbq.base = NULL;
2841 }
2842
2843
2844 kfree(rx_ring->lbq.queue);
2845 rx_ring->lbq.queue = NULL;
2846
2847
2848 if (rx_ring->cq_base) {
2849 dma_free_coherent(&qdev->pdev->dev,
2850 rx_ring->cq_size,
2851 rx_ring->cq_base, rx_ring->cq_base_dma);
2852 rx_ring->cq_base = NULL;
2853 }
2854}
2855
2856
2857
2858
2859static int qlge_alloc_rx_resources(struct qlge_adapter *qdev,
2860 struct rx_ring *rx_ring)
2861{
2862
2863
2864
2865 rx_ring->cq_base =
2866 dma_alloc_coherent(&qdev->pdev->dev, rx_ring->cq_size,
2867 &rx_ring->cq_base_dma, GFP_ATOMIC);
2868
2869 if (!rx_ring->cq_base) {
2870 netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
2871 return -ENOMEM;
2872 }
2873
2874 if (rx_ring->cq_id < qdev->rss_ring_count &&
2875 (qlge_init_bq(&rx_ring->sbq) || qlge_init_bq(&rx_ring->lbq))) {
2876 qlge_free_rx_resources(qdev, rx_ring);
2877 return -ENOMEM;
2878 }
2879
2880 return 0;
2881}
2882
2883static void qlge_tx_ring_clean(struct qlge_adapter *qdev)
2884{
2885 struct tx_ring *tx_ring;
2886 struct tx_ring_desc *tx_ring_desc;
2887 int i, j;
2888
2889
2890
2891
2892
2893 for (j = 0; j < qdev->tx_ring_count; j++) {
2894 tx_ring = &qdev->tx_ring[j];
2895 for (i = 0; i < tx_ring->wq_len; i++) {
2896 tx_ring_desc = &tx_ring->q[i];
2897 if (tx_ring_desc && tx_ring_desc->skb) {
2898 netif_err(qdev, ifdown, qdev->ndev,
2899 "Freeing lost SKB %p, from queue %d, index %d.\n",
2900 tx_ring_desc->skb, j,
2901 tx_ring_desc->index);
2902 qlge_unmap_send(qdev, tx_ring_desc,
2903 tx_ring_desc->map_cnt);
2904 dev_kfree_skb(tx_ring_desc->skb);
2905 tx_ring_desc->skb = NULL;
2906 }
2907 }
2908 }
2909}
2910
2911static void qlge_free_mem_resources(struct qlge_adapter *qdev)
2912{
2913 int i;
2914
2915 for (i = 0; i < qdev->tx_ring_count; i++)
2916 qlge_free_tx_resources(qdev, &qdev->tx_ring[i]);
2917 for (i = 0; i < qdev->rx_ring_count; i++)
2918 qlge_free_rx_resources(qdev, &qdev->rx_ring[i]);
2919 qlge_free_shadow_space(qdev);
2920}
2921
2922static int qlge_alloc_mem_resources(struct qlge_adapter *qdev)
2923{
2924 int i;
2925
2926
2927 if (qlge_alloc_shadow_space(qdev))
2928 return -ENOMEM;
2929
2930 for (i = 0; i < qdev->rx_ring_count; i++) {
2931 if (qlge_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
2932 netif_err(qdev, ifup, qdev->ndev,
2933 "RX resource allocation failed.\n");
2934 goto err_mem;
2935 }
2936 }
2937
2938 for (i = 0; i < qdev->tx_ring_count; i++) {
2939 if (qlge_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
2940 netif_err(qdev, ifup, qdev->ndev,
2941 "TX resource allocation failed.\n");
2942 goto err_mem;
2943 }
2944 }
2945 return 0;
2946
2947err_mem:
2948 qlge_free_mem_resources(qdev);
2949 return -ENOMEM;
2950}
2951
2952
2953
2954
2955
2956static int qlge_start_rx_ring(struct qlge_adapter *qdev, struct rx_ring *rx_ring)
2957{
2958 struct cqicb *cqicb = &rx_ring->cqicb;
2959 void *shadow_reg = qdev->rx_ring_shadow_reg_area +
2960 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
2961 u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
2962 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
2963 void __iomem *doorbell_area =
2964 qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
2965 int err = 0;
2966 u64 tmp;
2967 __le64 *base_indirect_ptr;
2968 int page_entries;
2969
2970
2971 rx_ring->prod_idx_sh_reg = shadow_reg;
2972 rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
2973 *rx_ring->prod_idx_sh_reg = 0;
2974 shadow_reg += sizeof(u64);
2975 shadow_reg_dma += sizeof(u64);
2976 rx_ring->lbq.base_indirect = shadow_reg;
2977 rx_ring->lbq.base_indirect_dma = shadow_reg_dma;
2978 shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN));
2979 shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN));
2980 rx_ring->sbq.base_indirect = shadow_reg;
2981 rx_ring->sbq.base_indirect_dma = shadow_reg_dma;
2982
2983
2984 rx_ring->cnsmr_idx_db_reg = (u32 __iomem *)doorbell_area;
2985 rx_ring->cnsmr_idx = 0;
2986 rx_ring->curr_entry = rx_ring->cq_base;
2987
2988
2989 rx_ring->valid_db_reg = doorbell_area + 0x04;
2990
2991
2992 rx_ring->lbq.prod_idx_db_reg = (u32 __iomem *)(doorbell_area + 0x18);
2993
2994
2995 rx_ring->sbq.prod_idx_db_reg = (u32 __iomem *)(doorbell_area + 0x1c);
2996
2997 memset((void *)cqicb, 0, sizeof(struct cqicb));
2998 cqicb->msix_vect = rx_ring->irq;
2999
3000 cqicb->len = cpu_to_le16(QLGE_FIT16(rx_ring->cq_len) | LEN_V |
3001 LEN_CPP_CONT);
3002
3003 cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
3004
3005 cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
3006
3007
3008
3009
3010 cqicb->flags = FLAGS_LC |
3011 FLAGS_LV |
3012 FLAGS_LI;
3013 if (rx_ring->cq_id < qdev->rss_ring_count) {
3014 cqicb->flags |= FLAGS_LL;
3015 tmp = (u64)rx_ring->lbq.base_dma;
3016 base_indirect_ptr = rx_ring->lbq.base_indirect;
3017 page_entries = 0;
3018 do {
3019 *base_indirect_ptr = cpu_to_le64(tmp);
3020 tmp += DB_PAGE_SIZE;
3021 base_indirect_ptr++;
3022 page_entries++;
3023 } while (page_entries < MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN));
3024 cqicb->lbq_addr = cpu_to_le64(rx_ring->lbq.base_indirect_dma);
3025 cqicb->lbq_buf_size =
3026 cpu_to_le16(QLGE_FIT16(qdev->lbq_buf_size));
3027 cqicb->lbq_len = cpu_to_le16(QLGE_FIT16(QLGE_BQ_LEN));
3028 rx_ring->lbq.next_to_use = 0;
3029 rx_ring->lbq.next_to_clean = 0;
3030
3031 cqicb->flags |= FLAGS_LS;
3032 tmp = (u64)rx_ring->sbq.base_dma;
3033 base_indirect_ptr = rx_ring->sbq.base_indirect;
3034 page_entries = 0;
3035 do {
3036 *base_indirect_ptr = cpu_to_le64(tmp);
3037 tmp += DB_PAGE_SIZE;
3038 base_indirect_ptr++;
3039 page_entries++;
3040 } while (page_entries < MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN));
3041 cqicb->sbq_addr =
3042 cpu_to_le64(rx_ring->sbq.base_indirect_dma);
3043 cqicb->sbq_buf_size = cpu_to_le16(SMALL_BUFFER_SIZE);
3044 cqicb->sbq_len = cpu_to_le16(QLGE_FIT16(QLGE_BQ_LEN));
3045 rx_ring->sbq.next_to_use = 0;
3046 rx_ring->sbq.next_to_clean = 0;
3047 }
3048 if (rx_ring->cq_id < qdev->rss_ring_count) {
3049
3050
3051
3052 netif_napi_add(qdev->ndev, &rx_ring->napi, qlge_napi_poll_msix,
3053 64);
3054 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
3055 cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
3056 } else {
3057 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
3058 cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
3059 }
3060 err = qlge_write_cfg(qdev, cqicb, sizeof(struct cqicb),
3061 CFG_LCQ, rx_ring->cq_id);
3062 if (err) {
3063 netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n");
3064 return err;
3065 }
3066 return err;
3067}
3068
3069static int qlge_start_tx_ring(struct qlge_adapter *qdev, struct tx_ring *tx_ring)
3070{
3071 struct wqicb *wqicb = (struct wqicb *)tx_ring;
3072 void __iomem *doorbell_area =
3073 qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
3074 void *shadow_reg = qdev->tx_ring_shadow_reg_area +
3075 (tx_ring->wq_id * sizeof(u64));
3076 u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
3077 (tx_ring->wq_id * sizeof(u64));
3078 int err = 0;
3079
3080
3081
3082
3083
3084 tx_ring->prod_idx_db_reg = (u32 __iomem *)doorbell_area;
3085 tx_ring->prod_idx = 0;
3086
3087 tx_ring->valid_db_reg = doorbell_area + 0x04;
3088
3089
3090
3091
3092 tx_ring->cnsmr_idx_sh_reg = shadow_reg;
3093 tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
3094
3095 wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
3096 wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
3097 Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
3098 wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
3099 wqicb->rid = 0;
3100 wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
3101
3102 wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
3103
3104 qlge_init_tx_ring(qdev, tx_ring);
3105
3106 err = qlge_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
3107 (u16)tx_ring->wq_id);
3108 if (err) {
3109 netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
3110 return err;
3111 }
3112 return err;
3113}
3114
3115static void qlge_disable_msix(struct qlge_adapter *qdev)
3116{
3117 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3118 pci_disable_msix(qdev->pdev);
3119 clear_bit(QL_MSIX_ENABLED, &qdev->flags);
3120 kfree(qdev->msi_x_entry);
3121 qdev->msi_x_entry = NULL;
3122 } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3123 pci_disable_msi(qdev->pdev);
3124 clear_bit(QL_MSI_ENABLED, &qdev->flags);
3125 }
3126}
3127
3128
3129
3130
3131
3132static void qlge_enable_msix(struct qlge_adapter *qdev)
3133{
3134 int i, err;
3135
3136
3137 if (qlge_irq_type == MSIX_IRQ) {
3138
3139
3140
3141 qdev->msi_x_entry = kcalloc(qdev->intr_count,
3142 sizeof(struct msix_entry),
3143 GFP_KERNEL);
3144 if (!qdev->msi_x_entry) {
3145 qlge_irq_type = MSI_IRQ;
3146 goto msi;
3147 }
3148
3149 for (i = 0; i < qdev->intr_count; i++)
3150 qdev->msi_x_entry[i].entry = i;
3151
3152 err = pci_enable_msix_range(qdev->pdev, qdev->msi_x_entry,
3153 1, qdev->intr_count);
3154 if (err < 0) {
3155 kfree(qdev->msi_x_entry);
3156 qdev->msi_x_entry = NULL;
3157 netif_warn(qdev, ifup, qdev->ndev,
3158 "MSI-X Enable failed, trying MSI.\n");
3159 qlge_irq_type = MSI_IRQ;
3160 } else {
3161 qdev->intr_count = err;
3162 set_bit(QL_MSIX_ENABLED, &qdev->flags);
3163 netif_info(qdev, ifup, qdev->ndev,
3164 "MSI-X Enabled, got %d vectors.\n",
3165 qdev->intr_count);
3166 return;
3167 }
3168 }
3169msi:
3170 qdev->intr_count = 1;
3171 if (qlge_irq_type == MSI_IRQ) {
3172 if (pci_alloc_irq_vectors(qdev->pdev, 1, 1, PCI_IRQ_MSI) >= 0) {
3173 set_bit(QL_MSI_ENABLED, &qdev->flags);
3174 netif_info(qdev, ifup, qdev->ndev,
3175 "Running with MSI interrupts.\n");
3176 return;
3177 }
3178 }
3179 qlge_irq_type = LEG_IRQ;
3180 set_bit(QL_LEGACY_ENABLED, &qdev->flags);
3181 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3182 "Running with legacy interrupts.\n");
3183}
3184
3185
3186
3187
3188
3189
3190
3191
3192
3193
3194static void qlge_set_tx_vect(struct qlge_adapter *qdev)
3195{
3196 int i, j, vect;
3197 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3198
3199 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3200
3201 for (vect = 0, j = 0, i = qdev->rss_ring_count;
3202 i < qdev->rx_ring_count; i++) {
3203 if (j == tx_rings_per_vector) {
3204 vect++;
3205 j = 0;
3206 }
3207 qdev->rx_ring[i].irq = vect;
3208 j++;
3209 }
3210 } else {
3211
3212
3213
3214 for (i = 0; i < qdev->rx_ring_count; i++)
3215 qdev->rx_ring[i].irq = 0;
3216 }
3217}
3218
3219
3220
3221
3222
3223
3224static void qlge_set_irq_mask(struct qlge_adapter *qdev, struct intr_context *ctx)
3225{
3226 int j, vect = ctx->intr;
3227 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3228
3229 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3230
3231
3232
3233 ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
3234
3235
3236
3237 for (j = 0; j < tx_rings_per_vector; j++) {
3238 ctx->irq_mask |=
3239 (1 << qdev->rx_ring[qdev->rss_ring_count +
3240 (vect * tx_rings_per_vector) + j].cq_id);
3241 }
3242 } else {
3243
3244
3245
3246 for (j = 0; j < qdev->rx_ring_count; j++)
3247 ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
3248 }
3249}
3250
3251
3252
3253
3254
3255
3256
3257static void qlge_resolve_queues_to_irqs(struct qlge_adapter *qdev)
3258{
3259 int i = 0;
3260 struct intr_context *intr_context = &qdev->intr_context[0];
3261
3262 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3263
3264
3265
3266
3267 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3268 qdev->rx_ring[i].irq = i;
3269 intr_context->intr = i;
3270 intr_context->qdev = qdev;
3271
3272
3273
3274 qlge_set_irq_mask(qdev, intr_context);
3275
3276
3277
3278
3279 intr_context->intr_en_mask =
3280 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3281 INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
3282 | i;
3283 intr_context->intr_dis_mask =
3284 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3285 INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
3286 INTR_EN_IHD | i;
3287 intr_context->intr_read_mask =
3288 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3289 INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
3290 i;
3291 if (i == 0) {
3292
3293
3294
3295
3296
3297 intr_context->handler = qlge_isr;
3298 sprintf(intr_context->name, "%s-rx-%d",
3299 qdev->ndev->name, i);
3300 } else {
3301
3302
3303
3304 intr_context->handler = qlge_msix_rx_isr;
3305 sprintf(intr_context->name, "%s-rx-%d",
3306 qdev->ndev->name, i);
3307 }
3308 }
3309 } else {
3310
3311
3312
3313
3314 intr_context->intr = 0;
3315 intr_context->qdev = qdev;
3316
3317
3318
3319
3320 intr_context->intr_en_mask =
3321 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
3322 intr_context->intr_dis_mask =
3323 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3324 INTR_EN_TYPE_DISABLE;
3325 if (test_bit(QL_LEGACY_ENABLED, &qdev->flags)) {
3326
3327
3328
3329
3330
3331 intr_context->intr_en_mask |= INTR_EN_EI << 16 |
3332 INTR_EN_EI;
3333 intr_context->intr_dis_mask |= INTR_EN_EI << 16;
3334 }
3335 intr_context->intr_read_mask =
3336 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
3337
3338
3339
3340 intr_context->handler = qlge_isr;
3341 sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
3342
3343
3344
3345
3346
3347 qlge_set_irq_mask(qdev, intr_context);
3348 }
3349
3350
3351
3352 qlge_set_tx_vect(qdev);
3353}
3354
3355static void qlge_free_irq(struct qlge_adapter *qdev)
3356{
3357 int i;
3358 struct intr_context *intr_context = &qdev->intr_context[0];
3359
3360 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3361 if (intr_context->hooked) {
3362 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3363 free_irq(qdev->msi_x_entry[i].vector,
3364 &qdev->rx_ring[i]);
3365 } else {
3366 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
3367 }
3368 }
3369 }
3370 qlge_disable_msix(qdev);
3371}
3372
3373static int qlge_request_irq(struct qlge_adapter *qdev)
3374{
3375 int i;
3376 int status = 0;
3377 struct pci_dev *pdev = qdev->pdev;
3378 struct intr_context *intr_context = &qdev->intr_context[0];
3379
3380 qlge_resolve_queues_to_irqs(qdev);
3381
3382 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3383 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3384 status = request_irq(qdev->msi_x_entry[i].vector,
3385 intr_context->handler,
3386 0,
3387 intr_context->name,
3388 &qdev->rx_ring[i]);
3389 if (status) {
3390 netif_err(qdev, ifup, qdev->ndev,
3391 "Failed request for MSIX interrupt %d.\n",
3392 i);
3393 goto err_irq;
3394 }
3395 } else {
3396 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3397 "trying msi or legacy interrupts.\n");
3398 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3399 "%s: irq = %d.\n", __func__, pdev->irq);
3400 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3401 "%s: context->name = %s.\n", __func__,
3402 intr_context->name);
3403 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3404 "%s: dev_id = 0x%p.\n", __func__,
3405 &qdev->rx_ring[0]);
3406 status =
3407 request_irq(pdev->irq, qlge_isr,
3408 test_bit(QL_MSI_ENABLED, &qdev->flags)
3409 ? 0
3410 : IRQF_SHARED,
3411 intr_context->name, &qdev->rx_ring[0]);
3412 if (status)
3413 goto err_irq;
3414
3415 netif_err(qdev, ifup, qdev->ndev,
3416 "Hooked intr 0, queue type RX_Q, with name %s.\n",
3417 intr_context->name);
3418 }
3419 intr_context->hooked = 1;
3420 }
3421 return status;
3422err_irq:
3423 netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!\n");
3424 qlge_free_irq(qdev);
3425 return status;
3426}
3427
3428static int qlge_start_rss(struct qlge_adapter *qdev)
3429{
3430 static const u8 init_hash_seed[] = {
3431 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3432 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
3433 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
3434 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
3435 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
3436 };
3437 struct ricb *ricb = &qdev->ricb;
3438 int status = 0;
3439 int i;
3440 u8 *hash_id = (u8 *)ricb->hash_cq_id;
3441
3442 memset((void *)ricb, 0, sizeof(*ricb));
3443
3444 ricb->base_cq = RSS_L4K;
3445 ricb->flags =
3446 (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
3447 ricb->mask = cpu_to_le16((u16)(0x3ff));
3448
3449
3450
3451
3452 for (i = 0; i < 1024; i++)
3453 hash_id[i] = (i & (qdev->rss_ring_count - 1));
3454
3455 memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
3456 memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
3457
3458 status = qlge_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
3459 if (status) {
3460 netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n");
3461 return status;
3462 }
3463 return status;
3464}
3465
3466static int qlge_clear_routing_entries(struct qlge_adapter *qdev)
3467{
3468 int i, status = 0;
3469
3470 status = qlge_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3471 if (status)
3472 return status;
3473
3474 for (i = 0; i < 16; i++) {
3475 status = qlge_set_routing_reg(qdev, i, 0, 0);
3476 if (status) {
3477 netif_err(qdev, ifup, qdev->ndev,
3478 "Failed to init routing register for CAM packets.\n");
3479 break;
3480 }
3481 }
3482 qlge_sem_unlock(qdev, SEM_RT_IDX_MASK);
3483 return status;
3484}
3485
3486
3487static int qlge_route_initialize(struct qlge_adapter *qdev)
3488{
3489 int status = 0;
3490
3491
3492 status = qlge_clear_routing_entries(qdev);
3493 if (status)
3494 return status;
3495
3496 status = qlge_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3497 if (status)
3498 return status;
3499
3500 status = qlge_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT,
3501 RT_IDX_IP_CSUM_ERR, 1);
3502 if (status) {
3503 netif_err(qdev, ifup, qdev->ndev,
3504 "Failed to init routing register for IP CSUM error packets.\n");
3505 goto exit;
3506 }
3507 status = qlge_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT,
3508 RT_IDX_TU_CSUM_ERR, 1);
3509 if (status) {
3510 netif_err(qdev, ifup, qdev->ndev,
3511 "Failed to init routing register for TCP/UDP CSUM error packets.\n");
3512 goto exit;
3513 }
3514 status = qlge_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
3515 if (status) {
3516 netif_err(qdev, ifup, qdev->ndev,
3517 "Failed to init routing register for broadcast packets.\n");
3518 goto exit;
3519 }
3520
3521
3522
3523 if (qdev->rss_ring_count > 1) {
3524 status = qlge_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
3525 RT_IDX_RSS_MATCH, 1);
3526 if (status) {
3527 netif_err(qdev, ifup, qdev->ndev,
3528 "Failed to init routing register for MATCH RSS packets.\n");
3529 goto exit;
3530 }
3531 }
3532
3533 status = qlge_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
3534 RT_IDX_CAM_HIT, 1);
3535 if (status)
3536 netif_err(qdev, ifup, qdev->ndev,
3537 "Failed to init routing register for CAM packets.\n");
3538exit:
3539 qlge_sem_unlock(qdev, SEM_RT_IDX_MASK);
3540 return status;
3541}
3542
3543int qlge_cam_route_initialize(struct qlge_adapter *qdev)
3544{
3545 int status, set;
3546
3547
3548
3549
3550
3551 set = qlge_read32(qdev, STS);
3552 set &= qdev->port_link_up;
3553 status = qlge_set_mac_addr(qdev, set);
3554 if (status) {
3555 netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n");
3556 return status;
3557 }
3558
3559 status = qlge_route_initialize(qdev);
3560 if (status)
3561 netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n");
3562
3563 return status;
3564}
3565
3566static int qlge_adapter_initialize(struct qlge_adapter *qdev)
3567{
3568 u32 value, mask;
3569 int i;
3570 int status = 0;
3571
3572
3573
3574
3575 value = SYS_EFE | SYS_FAE;
3576 mask = value << 16;
3577 qlge_write32(qdev, SYS, mask | value);
3578
3579
3580 value = NIC_RCV_CFG_DFQ;
3581 mask = NIC_RCV_CFG_DFQ_MASK;
3582 if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) {
3583 value |= NIC_RCV_CFG_RV;
3584 mask |= (NIC_RCV_CFG_RV << 16);
3585 }
3586 qlge_write32(qdev, NIC_RCV_CFG, (mask | value));
3587
3588
3589 qlge_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3590
3591
3592 value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
3593 FSC_EC | FSC_VM_PAGE_4K;
3594 value |= SPLT_SETTING;
3595
3596
3597 mask = FSC_VM_PAGESIZE_MASK |
3598 FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3599 qlge_write32(qdev, FSC, mask | value);
3600
3601 qlge_write32(qdev, SPLT_HDR, SPLT_LEN);
3602
3603
3604
3605
3606
3607
3608 qlge_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
3609
3610
3611
3612
3613 value = qlge_read32(qdev, MGMT_RCV_CFG);
3614 value &= ~MGMT_RCV_CFG_RM;
3615 mask = 0xffff0000;
3616
3617
3618 qlge_write32(qdev, MGMT_RCV_CFG, mask);
3619 qlge_write32(qdev, MGMT_RCV_CFG, mask | value);
3620
3621
3622 if (qdev->pdev->subsystem_device == 0x0068 ||
3623 qdev->pdev->subsystem_device == 0x0180)
3624 qdev->wol = WAKE_MAGIC;
3625
3626
3627 for (i = 0; i < qdev->rx_ring_count; i++) {
3628 status = qlge_start_rx_ring(qdev, &qdev->rx_ring[i]);
3629 if (status) {
3630 netif_err(qdev, ifup, qdev->ndev,
3631 "Failed to start rx ring[%d].\n", i);
3632 return status;
3633 }
3634 }
3635
3636
3637
3638
3639 if (qdev->rss_ring_count > 1) {
3640 status = qlge_start_rss(qdev);
3641 if (status) {
3642 netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n");
3643 return status;
3644 }
3645 }
3646
3647
3648 for (i = 0; i < qdev->tx_ring_count; i++) {
3649 status = qlge_start_tx_ring(qdev, &qdev->tx_ring[i]);
3650 if (status) {
3651 netif_err(qdev, ifup, qdev->ndev,
3652 "Failed to start tx ring[%d].\n", i);
3653 return status;
3654 }
3655 }
3656
3657
3658 status = qdev->nic_ops->port_initialize(qdev);
3659 if (status)
3660 netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n");
3661
3662
3663 status = qlge_cam_route_initialize(qdev);
3664 if (status) {
3665 netif_err(qdev, ifup, qdev->ndev,
3666 "Failed to init CAM/Routing tables.\n");
3667 return status;
3668 }
3669
3670
3671 for (i = 0; i < qdev->rss_ring_count; i++)
3672 napi_enable(&qdev->rx_ring[i].napi);
3673
3674 return status;
3675}
3676
3677
3678static int qlge_adapter_reset(struct qlge_adapter *qdev)
3679{
3680 u32 value;
3681 int status = 0;
3682 unsigned long end_jiffies;
3683
3684
3685 status = qlge_clear_routing_entries(qdev);
3686 if (status) {
3687 netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n");
3688 return status;
3689 }
3690
3691
3692
3693
3694 if (!test_bit(QL_ASIC_RECOVERY, &qdev->flags)) {
3695
3696 qlge_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
3697
3698
3699 qlge_wait_fifo_empty(qdev);
3700 } else {
3701 clear_bit(QL_ASIC_RECOVERY, &qdev->flags);
3702 }
3703
3704 qlge_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
3705
3706 end_jiffies = jiffies + usecs_to_jiffies(30);
3707 do {
3708 value = qlge_read32(qdev, RST_FO);
3709 if ((value & RST_FO_FR) == 0)
3710 break;
3711 cpu_relax();
3712 } while (time_before(jiffies, end_jiffies));
3713
3714 if (value & RST_FO_FR) {
3715 netif_err(qdev, ifdown, qdev->ndev,
3716 "ETIMEDOUT!!! errored out of resetting the chip!\n");
3717 status = -ETIMEDOUT;
3718 }
3719
3720
3721 qlge_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
3722 return status;
3723}
3724
3725static void qlge_display_dev_info(struct net_device *ndev)
3726{
3727 struct qlge_adapter *qdev = netdev_to_qdev(ndev);
3728
3729 netif_info(qdev, probe, qdev->ndev,
3730 "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, XG Roll = %d, XG Rev = %d.\n",
3731 qdev->func,
3732 qdev->port,
3733 qdev->chip_rev_id & 0x0000000f,
3734 qdev->chip_rev_id >> 4 & 0x0000000f,
3735 qdev->chip_rev_id >> 8 & 0x0000000f,
3736 qdev->chip_rev_id >> 12 & 0x0000000f);
3737 netif_info(qdev, probe, qdev->ndev,
3738 "MAC address %pM\n", ndev->dev_addr);
3739}
3740
3741static int qlge_wol(struct qlge_adapter *qdev)
3742{
3743 int status = 0;
3744 u32 wol = MB_WOL_DISABLE;
3745
3746
3747
3748
3749
3750
3751
3752
3753 if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
3754 WAKE_MCAST | WAKE_BCAST)) {
3755 netif_err(qdev, ifdown, qdev->ndev,
3756 "Unsupported WOL parameter. qdev->wol = 0x%x.\n",
3757 qdev->wol);
3758 return -EINVAL;
3759 }
3760
3761 if (qdev->wol & WAKE_MAGIC) {
3762 status = qlge_mb_wol_set_magic(qdev, 1);
3763 if (status) {
3764 netif_err(qdev, ifdown, qdev->ndev,
3765 "Failed to set magic packet on %s.\n",
3766 qdev->ndev->name);
3767 return status;
3768 }
3769 netif_info(qdev, drv, qdev->ndev,
3770 "Enabled magic packet successfully on %s.\n",
3771 qdev->ndev->name);
3772
3773 wol |= MB_WOL_MAGIC_PKT;
3774 }
3775
3776 if (qdev->wol) {
3777 wol |= MB_WOL_MODE_ON;
3778 status = qlge_mb_wol_mode(qdev, wol);
3779 netif_err(qdev, drv, qdev->ndev,
3780 "WOL %s (wol code 0x%x) on %s\n",
3781 (status == 0) ? "Successfully set" : "Failed",
3782 wol, qdev->ndev->name);
3783 }
3784
3785 return status;
3786}
3787
3788static void qlge_cancel_all_work_sync(struct qlge_adapter *qdev)
3789{
3790
3791
3792
3793 if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3794 cancel_delayed_work_sync(&qdev->asic_reset_work);
3795 cancel_delayed_work_sync(&qdev->mpi_reset_work);
3796 cancel_delayed_work_sync(&qdev->mpi_work);
3797 cancel_delayed_work_sync(&qdev->mpi_idc_work);
3798 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
3799}
3800
3801static int qlge_adapter_down(struct qlge_adapter *qdev)
3802{
3803 int i, status = 0;
3804
3805 qlge_link_off(qdev);
3806
3807 qlge_cancel_all_work_sync(qdev);
3808
3809 for (i = 0; i < qdev->rss_ring_count; i++)
3810 napi_disable(&qdev->rx_ring[i].napi);
3811
3812 clear_bit(QL_ADAPTER_UP, &qdev->flags);
3813
3814 qlge_disable_interrupts(qdev);
3815
3816 qlge_tx_ring_clean(qdev);
3817
3818
3819 for (i = 0; i < qdev->rss_ring_count; i++)
3820 netif_napi_del(&qdev->rx_ring[i].napi);
3821
3822 status = qlge_adapter_reset(qdev);
3823 if (status)
3824 netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
3825 qdev->func);
3826 qlge_free_rx_buffers(qdev);
3827
3828 return status;
3829}
3830
3831static int qlge_adapter_up(struct qlge_adapter *qdev)
3832{
3833 int err = 0;
3834
3835 err = qlge_adapter_initialize(qdev);
3836 if (err) {
3837 netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n");
3838 goto err_init;
3839 }
3840 set_bit(QL_ADAPTER_UP, &qdev->flags);
3841 qlge_alloc_rx_buffers(qdev);
3842
3843
3844
3845 if ((qlge_read32(qdev, STS) & qdev->port_init) &&
3846 (qlge_read32(qdev, STS) & qdev->port_link_up))
3847 qlge_link_on(qdev);
3848
3849 clear_bit(QL_ALLMULTI, &qdev->flags);
3850 clear_bit(QL_PROMISCUOUS, &qdev->flags);
3851 qlge_set_multicast_list(qdev->ndev);
3852
3853
3854 qlge_restore_vlan(qdev);
3855
3856 qlge_enable_interrupts(qdev);
3857 qlge_enable_all_completion_interrupts(qdev);
3858 netif_tx_start_all_queues(qdev->ndev);
3859
3860 return 0;
3861err_init:
3862 qlge_adapter_reset(qdev);
3863 return err;
3864}
3865
3866static void qlge_release_adapter_resources(struct qlge_adapter *qdev)
3867{
3868 qlge_free_mem_resources(qdev);
3869 qlge_free_irq(qdev);
3870}
3871
3872static int qlge_get_adapter_resources(struct qlge_adapter *qdev)
3873{
3874 if (qlge_alloc_mem_resources(qdev)) {
3875 netif_err(qdev, ifup, qdev->ndev, "Unable to allocate memory.\n");
3876 return -ENOMEM;
3877 }
3878 return qlge_request_irq(qdev);
3879}
3880
3881static int qlge_close(struct net_device *ndev)
3882{
3883 struct qlge_adapter *qdev = netdev_to_qdev(ndev);
3884 int i;
3885
3886
3887
3888
3889
3890 if (test_bit(QL_EEH_FATAL, &qdev->flags)) {
3891 netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n");
3892 clear_bit(QL_EEH_FATAL, &qdev->flags);
3893 return 0;
3894 }
3895
3896
3897
3898
3899
3900 while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
3901 msleep(1);
3902
3903
3904 for (i = 0; i < qdev->rss_ring_count; i++)
3905 cancel_delayed_work_sync(&qdev->rx_ring[i].refill_work);
3906
3907 qlge_adapter_down(qdev);
3908 qlge_release_adapter_resources(qdev);
3909 return 0;
3910}
3911
3912static void qlge_set_lb_size(struct qlge_adapter *qdev)
3913{
3914 if (qdev->ndev->mtu <= 1500)
3915 qdev->lbq_buf_size = LARGE_BUFFER_MIN_SIZE;
3916 else
3917 qdev->lbq_buf_size = LARGE_BUFFER_MAX_SIZE;
3918 qdev->lbq_buf_order = get_order(qdev->lbq_buf_size);
3919}
3920
3921static int qlge_configure_rings(struct qlge_adapter *qdev)
3922{
3923 int i;
3924 struct rx_ring *rx_ring;
3925 struct tx_ring *tx_ring;
3926 int cpu_cnt = min_t(int, MAX_CPUS, num_online_cpus());
3927
3928
3929
3930
3931
3932
3933
3934
3935 qdev->intr_count = cpu_cnt;
3936 qlge_enable_msix(qdev);
3937
3938 qdev->rss_ring_count = qdev->intr_count;
3939 qdev->tx_ring_count = cpu_cnt;
3940 qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
3941
3942 for (i = 0; i < qdev->tx_ring_count; i++) {
3943 tx_ring = &qdev->tx_ring[i];
3944 memset((void *)tx_ring, 0, sizeof(*tx_ring));
3945 tx_ring->qdev = qdev;
3946 tx_ring->wq_id = i;
3947 tx_ring->wq_len = qdev->tx_ring_size;
3948 tx_ring->wq_size =
3949 tx_ring->wq_len * sizeof(struct qlge_ob_mac_iocb_req);
3950
3951
3952
3953
3954
3955 tx_ring->cq_id = qdev->rss_ring_count + i;
3956 }
3957
3958 for (i = 0; i < qdev->rx_ring_count; i++) {
3959 rx_ring = &qdev->rx_ring[i];
3960 memset((void *)rx_ring, 0, sizeof(*rx_ring));
3961 rx_ring->qdev = qdev;
3962 rx_ring->cq_id = i;
3963 rx_ring->cpu = i % cpu_cnt;
3964 if (i < qdev->rss_ring_count) {
3965
3966
3967
3968 rx_ring->cq_len = qdev->rx_ring_size;
3969 rx_ring->cq_size =
3970 rx_ring->cq_len * sizeof(struct qlge_net_rsp_iocb);
3971 rx_ring->lbq.type = QLGE_LB;
3972 rx_ring->sbq.type = QLGE_SB;
3973 INIT_DELAYED_WORK(&rx_ring->refill_work,
3974 &qlge_slow_refill);
3975 } else {
3976
3977
3978
3979
3980 rx_ring->cq_len = qdev->tx_ring_size;
3981 rx_ring->cq_size =
3982 rx_ring->cq_len * sizeof(struct qlge_net_rsp_iocb);
3983 }
3984 }
3985 return 0;
3986}
3987
3988static int qlge_open(struct net_device *ndev)
3989{
3990 struct qlge_adapter *qdev = netdev_to_qdev(ndev);
3991 int err = 0;
3992
3993 err = qlge_adapter_reset(qdev);
3994 if (err)
3995 return err;
3996
3997 qlge_set_lb_size(qdev);
3998 err = qlge_configure_rings(qdev);
3999 if (err)
4000 return err;
4001
4002 err = qlge_get_adapter_resources(qdev);
4003 if (err)
4004 goto error_up;
4005
4006 err = qlge_adapter_up(qdev);
4007 if (err)
4008 goto error_up;
4009
4010 return err;
4011
4012error_up:
4013 qlge_release_adapter_resources(qdev);
4014 return err;
4015}
4016
4017static int qlge_change_rx_buffers(struct qlge_adapter *qdev)
4018{
4019 int status;
4020
4021
4022 if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4023 int i = 4;
4024
4025 while (--i && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4026 netif_err(qdev, ifup, qdev->ndev,
4027 "Waiting for adapter UP...\n");
4028 ssleep(1);
4029 }
4030
4031 if (!i) {
4032 netif_err(qdev, ifup, qdev->ndev,
4033 "Timed out waiting for adapter UP\n");
4034 return -ETIMEDOUT;
4035 }
4036 }
4037
4038 status = qlge_adapter_down(qdev);
4039 if (status)
4040 goto error;
4041
4042 qlge_set_lb_size(qdev);
4043
4044 status = qlge_adapter_up(qdev);
4045 if (status)
4046 goto error;
4047
4048 return status;
4049error:
4050 netif_alert(qdev, ifup, qdev->ndev,
4051 "Driver up/down cycle failed, closing device.\n");
4052 set_bit(QL_ADAPTER_UP, &qdev->flags);
4053 dev_close(qdev->ndev);
4054 return status;
4055}
4056
4057static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
4058{
4059 struct qlge_adapter *qdev = netdev_to_qdev(ndev);
4060 int status;
4061
4062 if (ndev->mtu == 1500 && new_mtu == 9000)
4063 netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n");
4064 else if (ndev->mtu == 9000 && new_mtu == 1500)
4065 netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n");
4066 else
4067 return -EINVAL;
4068
4069 queue_delayed_work(qdev->workqueue,
4070 &qdev->mpi_port_cfg_work, 3 * HZ);
4071
4072 ndev->mtu = new_mtu;
4073
4074 if (!netif_running(qdev->ndev))
4075 return 0;
4076
4077 status = qlge_change_rx_buffers(qdev);
4078 if (status) {
4079 netif_err(qdev, ifup, qdev->ndev,
4080 "Changing MTU failed.\n");
4081 }
4082
4083 return status;
4084}
4085
4086static struct net_device_stats *qlge_get_stats(struct net_device
4087 *ndev)
4088{
4089 struct qlge_adapter *qdev = netdev_to_qdev(ndev);
4090 struct rx_ring *rx_ring = &qdev->rx_ring[0];
4091 struct tx_ring *tx_ring = &qdev->tx_ring[0];
4092 unsigned long pkts, mcast, dropped, errors, bytes;
4093 int i;
4094
4095
4096 pkts = mcast = dropped = errors = bytes = 0;
4097 for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
4098 pkts += rx_ring->rx_packets;
4099 bytes += rx_ring->rx_bytes;
4100 dropped += rx_ring->rx_dropped;
4101 errors += rx_ring->rx_errors;
4102 mcast += rx_ring->rx_multicast;
4103 }
4104 ndev->stats.rx_packets = pkts;
4105 ndev->stats.rx_bytes = bytes;
4106 ndev->stats.rx_dropped = dropped;
4107 ndev->stats.rx_errors = errors;
4108 ndev->stats.multicast = mcast;
4109
4110
4111 pkts = errors = bytes = 0;
4112 for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
4113 pkts += tx_ring->tx_packets;
4114 bytes += tx_ring->tx_bytes;
4115 errors += tx_ring->tx_errors;
4116 }
4117 ndev->stats.tx_packets = pkts;
4118 ndev->stats.tx_bytes = bytes;
4119 ndev->stats.tx_errors = errors;
4120 return &ndev->stats;
4121}
4122
4123static void qlge_set_multicast_list(struct net_device *ndev)
4124{
4125 struct qlge_adapter *qdev = netdev_to_qdev(ndev);
4126 struct netdev_hw_addr *ha;
4127 int i, status;
4128
4129 status = qlge_sem_spinlock(qdev, SEM_RT_IDX_MASK);
4130 if (status)
4131 return;
4132
4133
4134
4135
4136 if (ndev->flags & IFF_PROMISC) {
4137 if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4138 if (qlge_set_routing_reg
4139 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
4140 netif_err(qdev, hw, qdev->ndev,
4141 "Failed to set promiscuous mode.\n");
4142 } else {
4143 set_bit(QL_PROMISCUOUS, &qdev->flags);
4144 }
4145 }
4146 } else {
4147 if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4148 if (qlge_set_routing_reg
4149 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
4150 netif_err(qdev, hw, qdev->ndev,
4151 "Failed to clear promiscuous mode.\n");
4152 } else {
4153 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4154 }
4155 }
4156 }
4157
4158
4159
4160
4161
4162 if ((ndev->flags & IFF_ALLMULTI) ||
4163 (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) {
4164 if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
4165 if (qlge_set_routing_reg
4166 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
4167 netif_err(qdev, hw, qdev->ndev,
4168 "Failed to set all-multi mode.\n");
4169 } else {
4170 set_bit(QL_ALLMULTI, &qdev->flags);
4171 }
4172 }
4173 } else {
4174 if (test_bit(QL_ALLMULTI, &qdev->flags)) {
4175 if (qlge_set_routing_reg
4176 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
4177 netif_err(qdev, hw, qdev->ndev,
4178 "Failed to clear all-multi mode.\n");
4179 } else {
4180 clear_bit(QL_ALLMULTI, &qdev->flags);
4181 }
4182 }
4183 }
4184
4185 if (!netdev_mc_empty(ndev)) {
4186 status = qlge_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4187 if (status)
4188 goto exit;
4189 i = 0;
4190 netdev_for_each_mc_addr(ha, ndev) {
4191 if (qlge_set_mac_addr_reg(qdev, (u8 *)ha->addr,
4192 MAC_ADDR_TYPE_MULTI_MAC, i)) {
4193 netif_err(qdev, hw, qdev->ndev,
4194 "Failed to loadmulticast address.\n");
4195 qlge_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4196 goto exit;
4197 }
4198 i++;
4199 }
4200 qlge_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4201 if (qlge_set_routing_reg
4202 (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
4203 netif_err(qdev, hw, qdev->ndev,
4204 "Failed to set multicast match mode.\n");
4205 } else {
4206 set_bit(QL_ALLMULTI, &qdev->flags);
4207 }
4208 }
4209exit:
4210 qlge_sem_unlock(qdev, SEM_RT_IDX_MASK);
4211}
4212
4213static int qlge_set_mac_address(struct net_device *ndev, void *p)
4214{
4215 struct qlge_adapter *qdev = netdev_to_qdev(ndev);
4216 struct sockaddr *addr = p;
4217 int status;
4218
4219 if (!is_valid_ether_addr(addr->sa_data))
4220 return -EADDRNOTAVAIL;
4221 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
4222
4223 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4224
4225 status = qlge_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4226 if (status)
4227 return status;
4228 status = qlge_set_mac_addr_reg(qdev, (u8 *)ndev->dev_addr,
4229 MAC_ADDR_TYPE_CAM_MAC,
4230 qdev->func * MAX_CQ);
4231 if (status)
4232 netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
4233 qlge_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4234 return status;
4235}
4236
4237static void qlge_tx_timeout(struct net_device *ndev, unsigned int txqueue)
4238{
4239 struct qlge_adapter *qdev = netdev_to_qdev(ndev);
4240
4241 qlge_queue_asic_error(qdev);
4242}
4243
4244static void qlge_asic_reset_work(struct work_struct *work)
4245{
4246 struct qlge_adapter *qdev =
4247 container_of(work, struct qlge_adapter, asic_reset_work.work);
4248 int status;
4249
4250 rtnl_lock();
4251 status = qlge_adapter_down(qdev);
4252 if (status)
4253 goto error;
4254
4255 status = qlge_adapter_up(qdev);
4256 if (status)
4257 goto error;
4258
4259
4260 clear_bit(QL_ALLMULTI, &qdev->flags);
4261 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4262 qlge_set_multicast_list(qdev->ndev);
4263
4264 rtnl_unlock();
4265 return;
4266error:
4267 netif_alert(qdev, ifup, qdev->ndev,
4268 "Driver up/down cycle failed, closing device\n");
4269
4270 set_bit(QL_ADAPTER_UP, &qdev->flags);
4271 dev_close(qdev->ndev);
4272 rtnl_unlock();
4273}
4274
4275static const struct nic_operations qla8012_nic_ops = {
4276 .get_flash = qlge_get_8012_flash_params,
4277 .port_initialize = qlge_8012_port_initialize,
4278};
4279
4280static const struct nic_operations qla8000_nic_ops = {
4281 .get_flash = qlge_get_8000_flash_params,
4282 .port_initialize = qlge_8000_port_initialize,
4283};
4284
4285
4286
4287
4288
4289
4290
4291
4292static int qlge_get_alt_pcie_func(struct qlge_adapter *qdev)
4293{
4294 int status = 0;
4295 u32 temp;
4296 u32 nic_func1, nic_func2;
4297
4298 status = qlge_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
4299 &temp);
4300 if (status)
4301 return status;
4302
4303 nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
4304 MPI_TEST_NIC_FUNC_MASK);
4305 nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
4306 MPI_TEST_NIC_FUNC_MASK);
4307
4308 if (qdev->func == nic_func1)
4309 qdev->alt_func = nic_func2;
4310 else if (qdev->func == nic_func2)
4311 qdev->alt_func = nic_func1;
4312 else
4313 status = -EIO;
4314
4315 return status;
4316}
4317
4318static int qlge_get_board_info(struct qlge_adapter *qdev)
4319{
4320 int status;
4321
4322 qdev->func =
4323 (qlge_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
4324 if (qdev->func > 3)
4325 return -EIO;
4326
4327 status = qlge_get_alt_pcie_func(qdev);
4328 if (status)
4329 return status;
4330
4331 qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
4332 if (qdev->port) {
4333 qdev->xg_sem_mask = SEM_XGMAC1_MASK;
4334 qdev->port_link_up = STS_PL1;
4335 qdev->port_init = STS_PI1;
4336 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
4337 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
4338 } else {
4339 qdev->xg_sem_mask = SEM_XGMAC0_MASK;
4340 qdev->port_link_up = STS_PL0;
4341 qdev->port_init = STS_PI0;
4342 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
4343 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
4344 }
4345 qdev->chip_rev_id = qlge_read32(qdev, REV_ID);
4346 qdev->device_id = qdev->pdev->device;
4347 if (qdev->device_id == QLGE_DEVICE_ID_8012)
4348 qdev->nic_ops = &qla8012_nic_ops;
4349 else if (qdev->device_id == QLGE_DEVICE_ID_8000)
4350 qdev->nic_ops = &qla8000_nic_ops;
4351 return status;
4352}
4353
4354static void qlge_release_all(struct pci_dev *pdev)
4355{
4356 struct qlge_adapter *qdev = pci_get_drvdata(pdev);
4357
4358 if (qdev->workqueue) {
4359 destroy_workqueue(qdev->workqueue);
4360 qdev->workqueue = NULL;
4361 }
4362
4363 if (qdev->reg_base)
4364 iounmap(qdev->reg_base);
4365 if (qdev->doorbell_area)
4366 iounmap(qdev->doorbell_area);
4367 vfree(qdev->mpi_coredump);
4368 pci_release_regions(pdev);
4369}
4370
4371static int qlge_init_device(struct pci_dev *pdev, struct qlge_adapter *qdev,
4372 int cards_found)
4373{
4374 struct net_device *ndev = qdev->ndev;
4375 int err = 0;
4376
4377 err = pci_enable_device(pdev);
4378 if (err) {
4379 dev_err(&pdev->dev, "PCI device enable failed.\n");
4380 return err;
4381 }
4382
4383 qdev->pdev = pdev;
4384 pci_set_drvdata(pdev, qdev);
4385
4386
4387 err = pcie_set_readrq(pdev, 4096);
4388 if (err) {
4389 dev_err(&pdev->dev, "Set readrq failed.\n");
4390 goto err_disable_pci;
4391 }
4392
4393 err = pci_request_regions(pdev, DRV_NAME);
4394 if (err) {
4395 dev_err(&pdev->dev, "PCI region request failed.\n");
4396 goto err_disable_pci;
4397 }
4398
4399 pci_set_master(pdev);
4400 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
4401 set_bit(QL_DMA64, &qdev->flags);
4402 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
4403 } else {
4404 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
4405 if (!err)
4406 err = dma_set_coherent_mask(&pdev->dev,
4407 DMA_BIT_MASK(32));
4408 }
4409
4410 if (err) {
4411 dev_err(&pdev->dev, "No usable DMA configuration.\n");
4412 goto err_release_pci;
4413 }
4414
4415
4416 pdev->needs_freset = 1;
4417 pci_save_state(pdev);
4418 qdev->reg_base =
4419 ioremap(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1));
4420 if (!qdev->reg_base) {
4421 dev_err(&pdev->dev, "Register mapping failed.\n");
4422 err = -ENOMEM;
4423 goto err_release_pci;
4424 }
4425
4426 qdev->doorbell_area_size = pci_resource_len(pdev, 3);
4427 qdev->doorbell_area =
4428 ioremap(pci_resource_start(pdev, 3), pci_resource_len(pdev, 3));
4429 if (!qdev->doorbell_area) {
4430 dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
4431 err = -ENOMEM;
4432 goto err_iounmap_base;
4433 }
4434
4435 err = qlge_get_board_info(qdev);
4436 if (err) {
4437 dev_err(&pdev->dev, "Register access failed.\n");
4438 err = -EIO;
4439 goto err_iounmap_doorbell;
4440 }
4441 qdev->msg_enable = netif_msg_init(debug, default_msg);
4442 spin_lock_init(&qdev->stats_lock);
4443
4444 if (qlge_mpi_coredump) {
4445 qdev->mpi_coredump =
4446 vmalloc(sizeof(struct qlge_mpi_coredump));
4447 if (!qdev->mpi_coredump) {
4448 err = -ENOMEM;
4449 goto err_iounmap_doorbell;
4450 }
4451 if (qlge_force_coredump)
4452 set_bit(QL_FRC_COREDUMP, &qdev->flags);
4453 }
4454
4455 err = qdev->nic_ops->get_flash(qdev);
4456 if (err) {
4457 dev_err(&pdev->dev, "Invalid FLASH.\n");
4458 goto err_free_mpi_coredump;
4459 }
4460
4461
4462 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4463
4464
4465 qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
4466 qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
4467
4468
4469 qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
4470 qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
4471 qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4472 qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4473
4474
4475
4476
4477 qdev->workqueue = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM,
4478 ndev->name);
4479 if (!qdev->workqueue) {
4480 err = -ENOMEM;
4481 goto err_free_mpi_coredump;
4482 }
4483
4484 INIT_DELAYED_WORK(&qdev->asic_reset_work, qlge_asic_reset_work);
4485 INIT_DELAYED_WORK(&qdev->mpi_reset_work, qlge_mpi_reset_work);
4486 INIT_DELAYED_WORK(&qdev->mpi_work, qlge_mpi_work);
4487 INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, qlge_mpi_port_cfg_work);
4488 INIT_DELAYED_WORK(&qdev->mpi_idc_work, qlge_mpi_idc_work);
4489 init_completion(&qdev->ide_completion);
4490 mutex_init(&qdev->mpi_mutex);
4491
4492 if (!cards_found) {
4493 dev_info(&pdev->dev, "%s\n", DRV_STRING);
4494 dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
4495 DRV_NAME, DRV_VERSION);
4496 }
4497 return 0;
4498
4499err_free_mpi_coredump:
4500 vfree(qdev->mpi_coredump);
4501err_iounmap_doorbell:
4502 iounmap(qdev->doorbell_area);
4503err_iounmap_base:
4504 iounmap(qdev->reg_base);
4505err_release_pci:
4506 pci_release_regions(pdev);
4507err_disable_pci:
4508 pci_disable_device(pdev);
4509
4510 return err;
4511}
4512
4513static const struct net_device_ops qlge_netdev_ops = {
4514 .ndo_open = qlge_open,
4515 .ndo_stop = qlge_close,
4516 .ndo_start_xmit = qlge_send,
4517 .ndo_change_mtu = qlge_change_mtu,
4518 .ndo_get_stats = qlge_get_stats,
4519 .ndo_set_rx_mode = qlge_set_multicast_list,
4520 .ndo_set_mac_address = qlge_set_mac_address,
4521 .ndo_validate_addr = eth_validate_addr,
4522 .ndo_tx_timeout = qlge_tx_timeout,
4523 .ndo_set_features = qlge_set_features,
4524 .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid,
4525 .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid,
4526};
4527
4528static void qlge_timer(struct timer_list *t)
4529{
4530 struct qlge_adapter *qdev = from_timer(qdev, t, timer);
4531 u32 var = 0;
4532
4533 var = qlge_read32(qdev, STS);
4534 if (pci_channel_offline(qdev->pdev)) {
4535 netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var);
4536 return;
4537 }
4538
4539 mod_timer(&qdev->timer, jiffies + (5 * HZ));
4540}
4541
4542static const struct devlink_ops qlge_devlink_ops;
4543
4544static int qlge_probe(struct pci_dev *pdev,
4545 const struct pci_device_id *pci_entry)
4546{
4547 struct qlge_netdev_priv *ndev_priv;
4548 struct qlge_adapter *qdev = NULL;
4549 struct net_device *ndev = NULL;
4550 struct devlink *devlink;
4551 static int cards_found;
4552 int err;
4553
4554 devlink = devlink_alloc(&qlge_devlink_ops, sizeof(struct qlge_adapter));
4555 if (!devlink)
4556 return -ENOMEM;
4557
4558 qdev = devlink_priv(devlink);
4559
4560 ndev = alloc_etherdev_mq(sizeof(struct qlge_netdev_priv),
4561 min(MAX_CPUS,
4562 netif_get_num_default_rss_queues()));
4563 if (!ndev) {
4564 err = -ENOMEM;
4565 goto devlink_free;
4566 }
4567
4568 ndev_priv = netdev_priv(ndev);
4569 ndev_priv->qdev = qdev;
4570 ndev_priv->ndev = ndev;
4571 qdev->ndev = ndev;
4572 err = qlge_init_device(pdev, qdev, cards_found);
4573 if (err < 0)
4574 goto netdev_free;
4575
4576 SET_NETDEV_DEV(ndev, &pdev->dev);
4577 ndev->hw_features = NETIF_F_SG |
4578 NETIF_F_IP_CSUM |
4579 NETIF_F_TSO |
4580 NETIF_F_TSO_ECN |
4581 NETIF_F_HW_VLAN_CTAG_TX |
4582 NETIF_F_HW_VLAN_CTAG_RX |
4583 NETIF_F_HW_VLAN_CTAG_FILTER |
4584 NETIF_F_RXCSUM;
4585 ndev->features = ndev->hw_features;
4586 ndev->vlan_features = ndev->hw_features;
4587
4588 ndev->vlan_features &= ~(NETIF_F_HW_VLAN_CTAG_FILTER |
4589 NETIF_F_HW_VLAN_CTAG_TX |
4590 NETIF_F_HW_VLAN_CTAG_RX);
4591
4592 if (test_bit(QL_DMA64, &qdev->flags))
4593 ndev->features |= NETIF_F_HIGHDMA;
4594
4595
4596
4597
4598 ndev->tx_queue_len = qdev->tx_ring_size;
4599 ndev->irq = pdev->irq;
4600
4601 ndev->netdev_ops = &qlge_netdev_ops;
4602 ndev->ethtool_ops = &qlge_ethtool_ops;
4603 ndev->watchdog_timeo = 10 * HZ;
4604
4605
4606
4607
4608
4609 ndev->min_mtu = ETH_DATA_LEN;
4610 ndev->max_mtu = 9000;
4611
4612 err = register_netdev(ndev);
4613 if (err) {
4614 dev_err(&pdev->dev, "net device registration failed.\n");
4615 qlge_release_all(pdev);
4616 pci_disable_device(pdev);
4617 goto netdev_free;
4618 }
4619
4620 err = devlink_register(devlink, &pdev->dev);
4621 if (err)
4622 goto netdev_free;
4623
4624 err = qlge_health_create_reporters(qdev);
4625
4626 if (err)
4627 goto devlink_unregister;
4628
4629
4630
4631
4632 timer_setup(&qdev->timer, qlge_timer, TIMER_DEFERRABLE);
4633 mod_timer(&qdev->timer, jiffies + (5 * HZ));
4634 qlge_link_off(qdev);
4635 qlge_display_dev_info(ndev);
4636 atomic_set(&qdev->lb_count, 0);
4637 cards_found++;
4638 return 0;
4639
4640devlink_unregister:
4641 devlink_unregister(devlink);
4642netdev_free:
4643 free_netdev(ndev);
4644devlink_free:
4645 devlink_free(devlink);
4646
4647 return err;
4648}
4649
4650netdev_tx_t qlge_lb_send(struct sk_buff *skb, struct net_device *ndev)
4651{
4652 return qlge_send(skb, ndev);
4653}
4654
4655int qlge_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
4656{
4657 return qlge_clean_inbound_rx_ring(rx_ring, budget);
4658}
4659
4660static void qlge_remove(struct pci_dev *pdev)
4661{
4662 struct qlge_adapter *qdev = pci_get_drvdata(pdev);
4663 struct net_device *ndev = qdev->ndev;
4664 struct devlink *devlink = priv_to_devlink(qdev);
4665
4666 del_timer_sync(&qdev->timer);
4667 qlge_cancel_all_work_sync(qdev);
4668 unregister_netdev(ndev);
4669 qlge_release_all(pdev);
4670 pci_disable_device(pdev);
4671 devlink_health_reporter_destroy(qdev->reporter);
4672 devlink_unregister(devlink);
4673 devlink_free(devlink);
4674 free_netdev(ndev);
4675}
4676
4677
4678static void qlge_eeh_close(struct net_device *ndev)
4679{
4680 struct qlge_adapter *qdev = netdev_to_qdev(ndev);
4681 int i;
4682
4683 if (netif_carrier_ok(ndev)) {
4684 netif_carrier_off(ndev);
4685 netif_stop_queue(ndev);
4686 }
4687
4688
4689 qlge_cancel_all_work_sync(qdev);
4690
4691 for (i = 0; i < qdev->rss_ring_count; i++)
4692 netif_napi_del(&qdev->rx_ring[i].napi);
4693
4694 clear_bit(QL_ADAPTER_UP, &qdev->flags);
4695 qlge_tx_ring_clean(qdev);
4696 qlge_free_rx_buffers(qdev);
4697 qlge_release_adapter_resources(qdev);
4698}
4699
4700
4701
4702
4703
4704static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4705 pci_channel_state_t state)
4706{
4707 struct qlge_adapter *qdev = pci_get_drvdata(pdev);
4708 struct net_device *ndev = qdev->ndev;
4709
4710 switch (state) {
4711 case pci_channel_io_normal:
4712 return PCI_ERS_RESULT_CAN_RECOVER;
4713 case pci_channel_io_frozen:
4714 netif_device_detach(ndev);
4715 del_timer_sync(&qdev->timer);
4716 if (netif_running(ndev))
4717 qlge_eeh_close(ndev);
4718 pci_disable_device(pdev);
4719 return PCI_ERS_RESULT_NEED_RESET;
4720 case pci_channel_io_perm_failure:
4721 dev_err(&pdev->dev,
4722 "%s: pci_channel_io_perm_failure.\n", __func__);
4723 del_timer_sync(&qdev->timer);
4724 qlge_eeh_close(ndev);
4725 set_bit(QL_EEH_FATAL, &qdev->flags);
4726 return PCI_ERS_RESULT_DISCONNECT;
4727 }
4728
4729
4730 return PCI_ERS_RESULT_NEED_RESET;
4731}
4732
4733
4734
4735
4736
4737
4738
4739static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
4740{
4741 struct qlge_adapter *qdev = pci_get_drvdata(pdev);
4742
4743 pdev->error_state = pci_channel_io_normal;
4744
4745 pci_restore_state(pdev);
4746 if (pci_enable_device(pdev)) {
4747 netif_err(qdev, ifup, qdev->ndev,
4748 "Cannot re-enable PCI device after reset.\n");
4749 return PCI_ERS_RESULT_DISCONNECT;
4750 }
4751 pci_set_master(pdev);
4752
4753 if (qlge_adapter_reset(qdev)) {
4754 netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n");
4755 set_bit(QL_EEH_FATAL, &qdev->flags);
4756 return PCI_ERS_RESULT_DISCONNECT;
4757 }
4758
4759 return PCI_ERS_RESULT_RECOVERED;
4760}
4761
4762static void qlge_io_resume(struct pci_dev *pdev)
4763{
4764 struct qlge_adapter *qdev = pci_get_drvdata(pdev);
4765 struct net_device *ndev = qdev->ndev;
4766 int err = 0;
4767
4768 if (netif_running(ndev)) {
4769 err = qlge_open(ndev);
4770 if (err) {
4771 netif_err(qdev, ifup, qdev->ndev,
4772 "Device initialization failed after reset.\n");
4773 return;
4774 }
4775 } else {
4776 netif_err(qdev, ifup, qdev->ndev,
4777 "Device was not running prior to EEH.\n");
4778 }
4779 mod_timer(&qdev->timer, jiffies + (5 * HZ));
4780 netif_device_attach(ndev);
4781}
4782
4783static const struct pci_error_handlers qlge_err_handler = {
4784 .error_detected = qlge_io_error_detected,
4785 .slot_reset = qlge_io_slot_reset,
4786 .resume = qlge_io_resume,
4787};
4788
4789static int __maybe_unused qlge_suspend(struct device *dev_d)
4790{
4791 struct pci_dev *pdev = to_pci_dev(dev_d);
4792 struct qlge_adapter *qdev;
4793 struct net_device *ndev;
4794 int err;
4795
4796 qdev = pci_get_drvdata(pdev);
4797 ndev = qdev->ndev;
4798 netif_device_detach(ndev);
4799 del_timer_sync(&qdev->timer);
4800
4801 if (netif_running(ndev)) {
4802 err = qlge_adapter_down(qdev);
4803 if (!err)
4804 return err;
4805 }
4806
4807 qlge_wol(qdev);
4808
4809 return 0;
4810}
4811
4812static int __maybe_unused qlge_resume(struct device *dev_d)
4813{
4814 struct pci_dev *pdev = to_pci_dev(dev_d);
4815 struct qlge_adapter *qdev;
4816 struct net_device *ndev;
4817 int err;
4818
4819 qdev = pci_get_drvdata(pdev);
4820 ndev = qdev->ndev;
4821
4822 pci_set_master(pdev);
4823
4824 device_wakeup_disable(dev_d);
4825
4826 if (netif_running(ndev)) {
4827 err = qlge_adapter_up(qdev);
4828 if (err)
4829 return err;
4830 }
4831
4832 mod_timer(&qdev->timer, jiffies + (5 * HZ));
4833 netif_device_attach(ndev);
4834
4835 return 0;
4836}
4837
4838static void qlge_shutdown(struct pci_dev *pdev)
4839{
4840 qlge_suspend(&pdev->dev);
4841}
4842
4843static SIMPLE_DEV_PM_OPS(qlge_pm_ops, qlge_suspend, qlge_resume);
4844
4845static struct pci_driver qlge_driver = {
4846 .name = DRV_NAME,
4847 .id_table = qlge_pci_tbl,
4848 .probe = qlge_probe,
4849 .remove = qlge_remove,
4850 .driver.pm = &qlge_pm_ops,
4851 .shutdown = qlge_shutdown,
4852 .err_handler = &qlge_err_handler
4853};
4854
4855module_pci_driver(qlge_driver);
4856