1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35#include <linux/module.h>
36#include <linux/pci.h>
37#include <linux/interrupt.h>
38#include <linux/ip.h>
39#include <linux/ipv6.h>
40#include <linux/if_vlan.h>
41#include <linux/mdio.h>
42#include <linux/aer.h>
43#include <linux/bitops.h>
44#include <linux/netdevice.h>
45#include <linux/etherdevice.h>
46#include <net/ip6_checksum.h>
47#include <linux/crc32.h>
48#include "alx.h"
49#include "hw.h"
50#include "reg.h"
51
52const char alx_drv_name[] = "alx";
53
54static bool msix = true;
55module_param(msix, bool, 0);
56MODULE_PARM_DESC(msix, "Enable msi-x interrupt support (default: true)");
57
58static void alx_free_txbuf(struct alx_tx_queue *txq, int entry)
59{
60 struct alx_buffer *txb = &txq->bufs[entry];
61
62 if (dma_unmap_len(txb, size)) {
63 dma_unmap_single(txq->dev,
64 dma_unmap_addr(txb, dma),
65 dma_unmap_len(txb, size),
66 DMA_TO_DEVICE);
67 dma_unmap_len_set(txb, size, 0);
68 }
69
70 if (txb->skb) {
71 dev_kfree_skb_any(txb->skb);
72 txb->skb = NULL;
73 }
74}
75
76static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp)
77{
78 struct alx_rx_queue *rxq = alx->qnapi[0]->rxq;
79 struct sk_buff *skb;
80 struct alx_buffer *cur_buf;
81 dma_addr_t dma;
82 u16 cur, next, count = 0;
83
84 next = cur = rxq->write_idx;
85 if (++next == alx->rx_ringsz)
86 next = 0;
87 cur_buf = &rxq->bufs[cur];
88
89 while (!cur_buf->skb && next != rxq->read_idx) {
90 struct alx_rfd *rfd = &rxq->rfd[cur];
91
92
93
94
95
96
97
98
99
100
101 skb = __netdev_alloc_skb(alx->dev, alx->rxbuf_size + 64, gfp);
102 if (!skb)
103 break;
104
105 if (((unsigned long)skb->data & 0xfff) == 0xfc0)
106 skb_reserve(skb, 64);
107
108 dma = dma_map_single(&alx->hw.pdev->dev,
109 skb->data, alx->rxbuf_size,
110 DMA_FROM_DEVICE);
111 if (dma_mapping_error(&alx->hw.pdev->dev, dma)) {
112 dev_kfree_skb(skb);
113 break;
114 }
115
116
117
118
119 if (WARN_ON(dma & 3)) {
120 dev_kfree_skb(skb);
121 break;
122 }
123
124 cur_buf->skb = skb;
125 dma_unmap_len_set(cur_buf, size, alx->rxbuf_size);
126 dma_unmap_addr_set(cur_buf, dma, dma);
127 rfd->addr = cpu_to_le64(dma);
128
129 cur = next;
130 if (++next == alx->rx_ringsz)
131 next = 0;
132 cur_buf = &rxq->bufs[cur];
133 count++;
134 }
135
136 if (count) {
137
138 wmb();
139 rxq->write_idx = cur;
140 alx_write_mem16(&alx->hw, ALX_RFD_PIDX, cur);
141 }
142
143 return count;
144}
145
146static struct alx_tx_queue *alx_tx_queue_mapping(struct alx_priv *alx,
147 struct sk_buff *skb)
148{
149 unsigned int r_idx = skb->queue_mapping;
150
151 if (r_idx >= alx->num_txq)
152 r_idx = r_idx % alx->num_txq;
153
154 return alx->qnapi[r_idx]->txq;
155}
156
157static struct netdev_queue *alx_get_tx_queue(const struct alx_tx_queue *txq)
158{
159 return netdev_get_tx_queue(txq->netdev, txq->queue_idx);
160}
161
162static inline int alx_tpd_avail(struct alx_tx_queue *txq)
163{
164 if (txq->write_idx >= txq->read_idx)
165 return txq->count + txq->read_idx - txq->write_idx - 1;
166 return txq->read_idx - txq->write_idx - 1;
167}
168
169static bool alx_clean_tx_irq(struct alx_tx_queue *txq)
170{
171 struct alx_priv *alx;
172 struct netdev_queue *tx_queue;
173 u16 hw_read_idx, sw_read_idx;
174 unsigned int total_bytes = 0, total_packets = 0;
175 int budget = ALX_DEFAULT_TX_WORK;
176
177 alx = netdev_priv(txq->netdev);
178 tx_queue = alx_get_tx_queue(txq);
179
180 sw_read_idx = txq->read_idx;
181 hw_read_idx = alx_read_mem16(&alx->hw, txq->c_reg);
182
183 if (sw_read_idx != hw_read_idx) {
184 while (sw_read_idx != hw_read_idx && budget > 0) {
185 struct sk_buff *skb;
186
187 skb = txq->bufs[sw_read_idx].skb;
188 if (skb) {
189 total_bytes += skb->len;
190 total_packets++;
191 budget--;
192 }
193
194 alx_free_txbuf(txq, sw_read_idx);
195
196 if (++sw_read_idx == txq->count)
197 sw_read_idx = 0;
198 }
199 txq->read_idx = sw_read_idx;
200
201 netdev_tx_completed_queue(tx_queue, total_packets, total_bytes);
202 }
203
204 if (netif_tx_queue_stopped(tx_queue) && netif_carrier_ok(alx->dev) &&
205 alx_tpd_avail(txq) > txq->count / 4)
206 netif_tx_wake_queue(tx_queue);
207
208 return sw_read_idx == hw_read_idx;
209}
210
211static void alx_schedule_link_check(struct alx_priv *alx)
212{
213 schedule_work(&alx->link_check_wk);
214}
215
216static void alx_schedule_reset(struct alx_priv *alx)
217{
218 schedule_work(&alx->reset_wk);
219}
220
221static int alx_clean_rx_irq(struct alx_rx_queue *rxq, int budget)
222{
223 struct alx_priv *alx;
224 struct alx_rrd *rrd;
225 struct alx_buffer *rxb;
226 struct sk_buff *skb;
227 u16 length, rfd_cleaned = 0;
228 int work = 0;
229
230 alx = netdev_priv(rxq->netdev);
231
232 while (work < budget) {
233 rrd = &rxq->rrd[rxq->rrd_read_idx];
234 if (!(rrd->word3 & cpu_to_le32(1 << RRD_UPDATED_SHIFT)))
235 break;
236 rrd->word3 &= ~cpu_to_le32(1 << RRD_UPDATED_SHIFT);
237
238 if (ALX_GET_FIELD(le32_to_cpu(rrd->word0),
239 RRD_SI) != rxq->read_idx ||
240 ALX_GET_FIELD(le32_to_cpu(rrd->word0),
241 RRD_NOR) != 1) {
242 alx_schedule_reset(alx);
243 return work;
244 }
245
246 rxb = &rxq->bufs[rxq->read_idx];
247 dma_unmap_single(rxq->dev,
248 dma_unmap_addr(rxb, dma),
249 dma_unmap_len(rxb, size),
250 DMA_FROM_DEVICE);
251 dma_unmap_len_set(rxb, size, 0);
252 skb = rxb->skb;
253 rxb->skb = NULL;
254
255 if (rrd->word3 & cpu_to_le32(1 << RRD_ERR_RES_SHIFT) ||
256 rrd->word3 & cpu_to_le32(1 << RRD_ERR_LEN_SHIFT)) {
257 rrd->word3 = 0;
258 dev_kfree_skb_any(skb);
259 goto next_pkt;
260 }
261
262 length = ALX_GET_FIELD(le32_to_cpu(rrd->word3),
263 RRD_PKTLEN) - ETH_FCS_LEN;
264 skb_put(skb, length);
265 skb->protocol = eth_type_trans(skb, rxq->netdev);
266
267 skb_checksum_none_assert(skb);
268 if (alx->dev->features & NETIF_F_RXCSUM &&
269 !(rrd->word3 & (cpu_to_le32(1 << RRD_ERR_L4_SHIFT) |
270 cpu_to_le32(1 << RRD_ERR_IPV4_SHIFT)))) {
271 switch (ALX_GET_FIELD(le32_to_cpu(rrd->word2),
272 RRD_PID)) {
273 case RRD_PID_IPV6UDP:
274 case RRD_PID_IPV4UDP:
275 case RRD_PID_IPV4TCP:
276 case RRD_PID_IPV6TCP:
277 skb->ip_summed = CHECKSUM_UNNECESSARY;
278 break;
279 }
280 }
281
282 napi_gro_receive(&rxq->np->napi, skb);
283 work++;
284
285next_pkt:
286 if (++rxq->read_idx == rxq->count)
287 rxq->read_idx = 0;
288 if (++rxq->rrd_read_idx == rxq->count)
289 rxq->rrd_read_idx = 0;
290
291 if (++rfd_cleaned > ALX_RX_ALLOC_THRESH)
292 rfd_cleaned -= alx_refill_rx_ring(alx, GFP_ATOMIC);
293 }
294
295 if (rfd_cleaned)
296 alx_refill_rx_ring(alx, GFP_ATOMIC);
297
298 return work;
299}
300
301static int alx_poll(struct napi_struct *napi, int budget)
302{
303 struct alx_napi *np = container_of(napi, struct alx_napi, napi);
304 struct alx_priv *alx = np->alx;
305 struct alx_hw *hw = &alx->hw;
306 unsigned long flags;
307 bool tx_complete = true;
308 int work = 0;
309
310 if (np->txq)
311 tx_complete = alx_clean_tx_irq(np->txq);
312 if (np->rxq)
313 work = alx_clean_rx_irq(np->rxq, budget);
314
315 if (!tx_complete || work == budget)
316 return budget;
317
318 napi_complete(&np->napi);
319
320
321 if (alx->flags & ALX_FLAG_USING_MSIX) {
322 alx_mask_msix(hw, np->vec_idx, false);
323 } else {
324 spin_lock_irqsave(&alx->irq_lock, flags);
325 alx->int_mask |= ALX_ISR_TX_Q0 | ALX_ISR_RX_Q0;
326 alx_write_mem32(hw, ALX_IMR, alx->int_mask);
327 spin_unlock_irqrestore(&alx->irq_lock, flags);
328 }
329
330 alx_post_write(hw);
331
332 return work;
333}
334
335static bool alx_intr_handle_misc(struct alx_priv *alx, u32 intr)
336{
337 struct alx_hw *hw = &alx->hw;
338
339 if (intr & ALX_ISR_FATAL) {
340 netif_warn(alx, hw, alx->dev,
341 "fatal interrupt 0x%x, resetting\n", intr);
342 alx_schedule_reset(alx);
343 return true;
344 }
345
346 if (intr & ALX_ISR_ALERT)
347 netdev_warn(alx->dev, "alert interrupt: 0x%x\n", intr);
348
349 if (intr & ALX_ISR_PHY) {
350
351
352
353
354 alx->int_mask &= ~ALX_ISR_PHY;
355 alx_write_mem32(hw, ALX_IMR, alx->int_mask);
356 alx_schedule_link_check(alx);
357 }
358
359 return false;
360}
361
362static irqreturn_t alx_intr_handle(struct alx_priv *alx, u32 intr)
363{
364 struct alx_hw *hw = &alx->hw;
365
366 spin_lock(&alx->irq_lock);
367
368
369 alx_write_mem32(hw, ALX_ISR, intr | ALX_ISR_DIS);
370 intr &= alx->int_mask;
371
372 if (alx_intr_handle_misc(alx, intr))
373 goto out;
374
375 if (intr & (ALX_ISR_TX_Q0 | ALX_ISR_RX_Q0)) {
376 napi_schedule(&alx->qnapi[0]->napi);
377
378 alx->int_mask &= ~ALX_ISR_ALL_QUEUES;
379 alx_write_mem32(hw, ALX_IMR, alx->int_mask);
380 }
381
382 alx_write_mem32(hw, ALX_ISR, 0);
383
384 out:
385 spin_unlock(&alx->irq_lock);
386 return IRQ_HANDLED;
387}
388
389static irqreturn_t alx_intr_msix_ring(int irq, void *data)
390{
391 struct alx_napi *np = data;
392 struct alx_hw *hw = &np->alx->hw;
393
394
395 alx_mask_msix(hw, np->vec_idx, true);
396
397 alx_write_mem32(hw, ALX_ISR, np->vec_mask);
398
399 napi_schedule(&np->napi);
400
401 return IRQ_HANDLED;
402}
403
404static irqreturn_t alx_intr_msix_misc(int irq, void *data)
405{
406 struct alx_priv *alx = data;
407 struct alx_hw *hw = &alx->hw;
408 u32 intr;
409
410
411 alx_mask_msix(hw, 0, true);
412
413
414 intr = alx_read_mem32(hw, ALX_ISR);
415 intr &= (alx->int_mask & ~ALX_ISR_ALL_QUEUES);
416
417 if (alx_intr_handle_misc(alx, intr))
418 return IRQ_HANDLED;
419
420
421 alx_write_mem32(hw, ALX_ISR, intr);
422
423
424 alx_mask_msix(hw, 0, false);
425
426 return IRQ_HANDLED;
427}
428
429static irqreturn_t alx_intr_msi(int irq, void *data)
430{
431 struct alx_priv *alx = data;
432
433 return alx_intr_handle(alx, alx_read_mem32(&alx->hw, ALX_ISR));
434}
435
436static irqreturn_t alx_intr_legacy(int irq, void *data)
437{
438 struct alx_priv *alx = data;
439 struct alx_hw *hw = &alx->hw;
440 u32 intr;
441
442 intr = alx_read_mem32(hw, ALX_ISR);
443
444 if (intr & ALX_ISR_DIS || !(intr & alx->int_mask))
445 return IRQ_NONE;
446
447 return alx_intr_handle(alx, intr);
448}
449
450static const u16 txring_header_reg[] = {ALX_TPD_PRI0_ADDR_LO,
451 ALX_TPD_PRI1_ADDR_LO,
452 ALX_TPD_PRI2_ADDR_LO,
453 ALX_TPD_PRI3_ADDR_LO};
454
455static void alx_init_ring_ptrs(struct alx_priv *alx)
456{
457 struct alx_hw *hw = &alx->hw;
458 u32 addr_hi = ((u64)alx->descmem.dma) >> 32;
459 struct alx_napi *np;
460 int i;
461
462 for (i = 0; i < alx->num_napi; i++) {
463 np = alx->qnapi[i];
464 if (np->txq) {
465 np->txq->read_idx = 0;
466 np->txq->write_idx = 0;
467 alx_write_mem32(hw,
468 txring_header_reg[np->txq->queue_idx],
469 np->txq->tpd_dma);
470 }
471
472 if (np->rxq) {
473 np->rxq->read_idx = 0;
474 np->rxq->write_idx = 0;
475 np->rxq->rrd_read_idx = 0;
476 alx_write_mem32(hw, ALX_RRD_ADDR_LO, np->rxq->rrd_dma);
477 alx_write_mem32(hw, ALX_RFD_ADDR_LO, np->rxq->rfd_dma);
478 }
479 }
480
481 alx_write_mem32(hw, ALX_TX_BASE_ADDR_HI, addr_hi);
482 alx_write_mem32(hw, ALX_TPD_RING_SZ, alx->tx_ringsz);
483
484 alx_write_mem32(hw, ALX_RX_BASE_ADDR_HI, addr_hi);
485 alx_write_mem32(hw, ALX_RRD_RING_SZ, alx->rx_ringsz);
486 alx_write_mem32(hw, ALX_RFD_RING_SZ, alx->rx_ringsz);
487 alx_write_mem32(hw, ALX_RFD_BUF_SZ, alx->rxbuf_size);
488
489
490 alx_write_mem32(hw, ALX_SRAM9, ALX_SRAM_LOAD_PTR);
491}
492
493static void alx_free_txring_buf(struct alx_tx_queue *txq)
494{
495 int i;
496
497 if (!txq->bufs)
498 return;
499
500 for (i = 0; i < txq->count; i++)
501 alx_free_txbuf(txq, i);
502
503 memset(txq->bufs, 0, txq->count * sizeof(struct alx_buffer));
504 memset(txq->tpd, 0, txq->count * sizeof(struct alx_txd));
505 txq->write_idx = 0;
506 txq->read_idx = 0;
507
508 netdev_tx_reset_queue(alx_get_tx_queue(txq));
509}
510
511static void alx_free_rxring_buf(struct alx_rx_queue *rxq)
512{
513 struct alx_buffer *cur_buf;
514 u16 i;
515
516 if (!rxq->bufs)
517 return;
518
519 for (i = 0; i < rxq->count; i++) {
520 cur_buf = rxq->bufs + i;
521 if (cur_buf->skb) {
522 dma_unmap_single(rxq->dev,
523 dma_unmap_addr(cur_buf, dma),
524 dma_unmap_len(cur_buf, size),
525 DMA_FROM_DEVICE);
526 dev_kfree_skb(cur_buf->skb);
527 cur_buf->skb = NULL;
528 dma_unmap_len_set(cur_buf, size, 0);
529 dma_unmap_addr_set(cur_buf, dma, 0);
530 }
531 }
532
533 rxq->write_idx = 0;
534 rxq->read_idx = 0;
535 rxq->rrd_read_idx = 0;
536}
537
538static void alx_free_buffers(struct alx_priv *alx)
539{
540 int i;
541
542 for (i = 0; i < alx->num_txq; i++)
543 if (alx->qnapi[i] && alx->qnapi[i]->txq)
544 alx_free_txring_buf(alx->qnapi[i]->txq);
545
546 if (alx->qnapi[0] && alx->qnapi[0]->rxq)
547 alx_free_rxring_buf(alx->qnapi[0]->rxq);
548}
549
550static int alx_reinit_rings(struct alx_priv *alx)
551{
552 alx_free_buffers(alx);
553
554 alx_init_ring_ptrs(alx);
555
556 if (!alx_refill_rx_ring(alx, GFP_KERNEL))
557 return -ENOMEM;
558
559 return 0;
560}
561
562static void alx_add_mc_addr(struct alx_hw *hw, const u8 *addr, u32 *mc_hash)
563{
564 u32 crc32, bit, reg;
565
566 crc32 = ether_crc(ETH_ALEN, addr);
567 reg = (crc32 >> 31) & 0x1;
568 bit = (crc32 >> 26) & 0x1F;
569
570 mc_hash[reg] |= BIT(bit);
571}
572
573static void __alx_set_rx_mode(struct net_device *netdev)
574{
575 struct alx_priv *alx = netdev_priv(netdev);
576 struct alx_hw *hw = &alx->hw;
577 struct netdev_hw_addr *ha;
578 u32 mc_hash[2] = {};
579
580 if (!(netdev->flags & IFF_ALLMULTI)) {
581 netdev_for_each_mc_addr(ha, netdev)
582 alx_add_mc_addr(hw, ha->addr, mc_hash);
583
584 alx_write_mem32(hw, ALX_HASH_TBL0, mc_hash[0]);
585 alx_write_mem32(hw, ALX_HASH_TBL1, mc_hash[1]);
586 }
587
588 hw->rx_ctrl &= ~(ALX_MAC_CTRL_MULTIALL_EN | ALX_MAC_CTRL_PROMISC_EN);
589 if (netdev->flags & IFF_PROMISC)
590 hw->rx_ctrl |= ALX_MAC_CTRL_PROMISC_EN;
591 if (netdev->flags & IFF_ALLMULTI)
592 hw->rx_ctrl |= ALX_MAC_CTRL_MULTIALL_EN;
593
594 alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl);
595}
596
597static void alx_set_rx_mode(struct net_device *netdev)
598{
599 __alx_set_rx_mode(netdev);
600}
601
602static int alx_set_mac_address(struct net_device *netdev, void *data)
603{
604 struct alx_priv *alx = netdev_priv(netdev);
605 struct alx_hw *hw = &alx->hw;
606 struct sockaddr *addr = data;
607
608 if (!is_valid_ether_addr(addr->sa_data))
609 return -EADDRNOTAVAIL;
610
611 if (netdev->addr_assign_type & NET_ADDR_RANDOM)
612 netdev->addr_assign_type ^= NET_ADDR_RANDOM;
613
614 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
615 memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len);
616 alx_set_macaddr(hw, hw->mac_addr);
617
618 return 0;
619}
620
621static int alx_alloc_tx_ring(struct alx_priv *alx, struct alx_tx_queue *txq,
622 int offset)
623{
624 txq->bufs = kcalloc(txq->count, sizeof(struct alx_buffer), GFP_KERNEL);
625 if (!txq->bufs)
626 return -ENOMEM;
627
628 txq->tpd = alx->descmem.virt + offset;
629 txq->tpd_dma = alx->descmem.dma + offset;
630 offset += sizeof(struct alx_txd) * txq->count;
631
632 return offset;
633}
634
635static int alx_alloc_rx_ring(struct alx_priv *alx, struct alx_rx_queue *rxq,
636 int offset)
637{
638 rxq->bufs = kcalloc(rxq->count, sizeof(struct alx_buffer), GFP_KERNEL);
639 if (!rxq->bufs)
640 return -ENOMEM;
641
642 rxq->rrd = alx->descmem.virt + offset;
643 rxq->rrd_dma = alx->descmem.dma + offset;
644 offset += sizeof(struct alx_rrd) * rxq->count;
645
646 rxq->rfd = alx->descmem.virt + offset;
647 rxq->rfd_dma = alx->descmem.dma + offset;
648 offset += sizeof(struct alx_rfd) * rxq->count;
649
650 return offset;
651}
652
653static int alx_alloc_rings(struct alx_priv *alx)
654{
655 int i, offset = 0;
656
657
658
659
660
661
662
663 alx->descmem.size = sizeof(struct alx_txd) * alx->tx_ringsz *
664 alx->num_txq +
665 sizeof(struct alx_rrd) * alx->rx_ringsz +
666 sizeof(struct alx_rfd) * alx->rx_ringsz;
667 alx->descmem.virt = dma_zalloc_coherent(&alx->hw.pdev->dev,
668 alx->descmem.size,
669 &alx->descmem.dma,
670 GFP_KERNEL);
671 if (!alx->descmem.virt)
672 return -ENOMEM;
673
674
675 BUILD_BUG_ON(sizeof(struct alx_txd) % 8);
676 BUILD_BUG_ON(sizeof(struct alx_rrd) % 8);
677
678 for (i = 0; i < alx->num_txq; i++) {
679 offset = alx_alloc_tx_ring(alx, alx->qnapi[i]->txq, offset);
680 if (offset < 0) {
681 netdev_err(alx->dev, "Allocation of tx buffer failed!\n");
682 return -ENOMEM;
683 }
684 }
685
686 offset = alx_alloc_rx_ring(alx, alx->qnapi[0]->rxq, offset);
687 if (offset < 0) {
688 netdev_err(alx->dev, "Allocation of rx buffer failed!\n");
689 return -ENOMEM;
690 }
691
692 return 0;
693}
694
695static void alx_free_rings(struct alx_priv *alx)
696{
697 int i;
698
699 alx_free_buffers(alx);
700
701 for (i = 0; i < alx->num_txq; i++)
702 if (alx->qnapi[i] && alx->qnapi[i]->txq)
703 kfree(alx->qnapi[i]->txq->bufs);
704
705 if (alx->qnapi[0] && alx->qnapi[0]->rxq)
706 kfree(alx->qnapi[0]->rxq->bufs);
707
708 if (alx->descmem.virt)
709 dma_free_coherent(&alx->hw.pdev->dev,
710 alx->descmem.size,
711 alx->descmem.virt,
712 alx->descmem.dma);
713}
714
715static void alx_free_napis(struct alx_priv *alx)
716{
717 struct alx_napi *np;
718 int i;
719
720 for (i = 0; i < alx->num_napi; i++) {
721 np = alx->qnapi[i];
722 if (!np)
723 continue;
724
725 netif_napi_del(&np->napi);
726 kfree(np->txq);
727 kfree(np->rxq);
728 kfree(np);
729 alx->qnapi[i] = NULL;
730 }
731}
732
733static const u16 tx_pidx_reg[] = {ALX_TPD_PRI0_PIDX, ALX_TPD_PRI1_PIDX,
734 ALX_TPD_PRI2_PIDX, ALX_TPD_PRI3_PIDX};
735static const u16 tx_cidx_reg[] = {ALX_TPD_PRI0_CIDX, ALX_TPD_PRI1_CIDX,
736 ALX_TPD_PRI2_CIDX, ALX_TPD_PRI3_CIDX};
737static const u32 tx_vect_mask[] = {ALX_ISR_TX_Q0, ALX_ISR_TX_Q1,
738 ALX_ISR_TX_Q2, ALX_ISR_TX_Q3};
739static const u32 rx_vect_mask[] = {ALX_ISR_RX_Q0, ALX_ISR_RX_Q1,
740 ALX_ISR_RX_Q2, ALX_ISR_RX_Q3,
741 ALX_ISR_RX_Q4, ALX_ISR_RX_Q5,
742 ALX_ISR_RX_Q6, ALX_ISR_RX_Q7};
743
744static int alx_alloc_napis(struct alx_priv *alx)
745{
746 struct alx_napi *np;
747 struct alx_rx_queue *rxq;
748 struct alx_tx_queue *txq;
749 int i;
750
751 alx->int_mask &= ~ALX_ISR_ALL_QUEUES;
752
753
754 for (i = 0; i < alx->num_napi; i++) {
755 np = kzalloc(sizeof(struct alx_napi), GFP_KERNEL);
756 if (!np)
757 goto err_out;
758
759 np->alx = alx;
760 netif_napi_add(alx->dev, &np->napi, alx_poll, 64);
761 alx->qnapi[i] = np;
762 }
763
764
765 for (i = 0; i < alx->num_txq; i++) {
766 np = alx->qnapi[i];
767 txq = kzalloc(sizeof(*txq), GFP_KERNEL);
768 if (!txq)
769 goto err_out;
770
771 np->txq = txq;
772 txq->p_reg = tx_pidx_reg[i];
773 txq->c_reg = tx_cidx_reg[i];
774 txq->queue_idx = i;
775 txq->count = alx->tx_ringsz;
776 txq->netdev = alx->dev;
777 txq->dev = &alx->hw.pdev->dev;
778 np->vec_mask |= tx_vect_mask[i];
779 alx->int_mask |= tx_vect_mask[i];
780 }
781
782
783 np = alx->qnapi[0];
784 rxq = kzalloc(sizeof(*rxq), GFP_KERNEL);
785 if (!rxq)
786 goto err_out;
787
788 np->rxq = rxq;
789 rxq->np = alx->qnapi[0];
790 rxq->queue_idx = 0;
791 rxq->count = alx->rx_ringsz;
792 rxq->netdev = alx->dev;
793 rxq->dev = &alx->hw.pdev->dev;
794 np->vec_mask |= rx_vect_mask[0];
795 alx->int_mask |= rx_vect_mask[0];
796
797 return 0;
798
799err_out:
800 netdev_err(alx->dev, "error allocating internal structures\n");
801 alx_free_napis(alx);
802 return -ENOMEM;
803}
804
805static const int txq_vec_mapping_shift[] = {
806 0, ALX_MSI_MAP_TBL1_TXQ0_SHIFT,
807 0, ALX_MSI_MAP_TBL1_TXQ1_SHIFT,
808 1, ALX_MSI_MAP_TBL2_TXQ2_SHIFT,
809 1, ALX_MSI_MAP_TBL2_TXQ3_SHIFT,
810};
811
812static void alx_config_vector_mapping(struct alx_priv *alx)
813{
814 struct alx_hw *hw = &alx->hw;
815 u32 tbl[2] = {0, 0};
816 int i, vector, idx, shift;
817
818 if (alx->flags & ALX_FLAG_USING_MSIX) {
819
820 for (i = 0, vector = 1; i < alx->num_txq; i++, vector++) {
821 idx = txq_vec_mapping_shift[i * 2];
822 shift = txq_vec_mapping_shift[i * 2 + 1];
823 tbl[idx] |= vector << shift;
824 }
825
826
827 tbl[0] |= 1 << ALX_MSI_MAP_TBL1_RXQ0_SHIFT;
828 }
829
830 alx_write_mem32(hw, ALX_MSI_MAP_TBL1, tbl[0]);
831 alx_write_mem32(hw, ALX_MSI_MAP_TBL2, tbl[1]);
832 alx_write_mem32(hw, ALX_MSI_ID_MAP, 0);
833}
834
835static bool alx_enable_msix(struct alx_priv *alx)
836{
837 int i, err, num_vec, num_txq, num_rxq;
838
839 num_txq = min_t(int, num_online_cpus(), ALX_MAX_TX_QUEUES);
840 num_rxq = 1;
841 num_vec = max_t(int, num_txq, num_rxq) + 1;
842
843 alx->msix_entries = kcalloc(num_vec, sizeof(struct msix_entry),
844 GFP_KERNEL);
845 if (!alx->msix_entries) {
846 netdev_warn(alx->dev, "Allocation of msix entries failed!\n");
847 return false;
848 }
849
850 for (i = 0; i < num_vec; i++)
851 alx->msix_entries[i].entry = i;
852
853 err = pci_enable_msix(alx->hw.pdev, alx->msix_entries, num_vec);
854 if (err) {
855 kfree(alx->msix_entries);
856 netdev_warn(alx->dev, "Enabling MSI-X interrupts failed!\n");
857 return false;
858 }
859
860 alx->num_vec = num_vec;
861 alx->num_napi = num_vec - 1;
862 alx->num_txq = num_txq;
863 alx->num_rxq = num_rxq;
864
865 return true;
866}
867
868static int alx_request_msix(struct alx_priv *alx)
869{
870 struct net_device *netdev = alx->dev;
871 int i, err, vector = 0, free_vector = 0;
872
873 err = request_irq(alx->msix_entries[0].vector, alx_intr_msix_misc,
874 0, netdev->name, alx);
875 if (err)
876 goto out_err;
877
878 for (i = 0; i < alx->num_napi; i++) {
879 struct alx_napi *np = alx->qnapi[i];
880
881 vector++;
882
883 if (np->txq && np->rxq)
884 sprintf(np->irq_lbl, "%s-TxRx-%u", netdev->name,
885 np->txq->queue_idx);
886 else if (np->txq)
887 sprintf(np->irq_lbl, "%s-tx-%u", netdev->name,
888 np->txq->queue_idx);
889 else if (np->rxq)
890 sprintf(np->irq_lbl, "%s-rx-%u", netdev->name,
891 np->rxq->queue_idx);
892 else
893 sprintf(np->irq_lbl, "%s-unused", netdev->name);
894
895 np->vec_idx = vector;
896 err = request_irq(alx->msix_entries[vector].vector,
897 alx_intr_msix_ring, 0, np->irq_lbl, np);
898 if (err)
899 goto out_free;
900 }
901 return 0;
902
903out_free:
904 free_irq(alx->msix_entries[free_vector++].vector, alx);
905
906 vector--;
907 for (i = 0; i < vector; i++)
908 free_irq(alx->msix_entries[free_vector++].vector,
909 alx->qnapi[i]);
910
911out_err:
912 return err;
913}
914
915static void alx_init_intr(struct alx_priv *alx, bool msix)
916{
917 if (msix) {
918 if (alx_enable_msix(alx))
919 alx->flags |= ALX_FLAG_USING_MSIX;
920 }
921
922 if (!(alx->flags & ALX_FLAG_USING_MSIX)) {
923 alx->num_vec = 1;
924 alx->num_napi = 1;
925 alx->num_txq = 1;
926 alx->num_rxq = 1;
927
928 if (!pci_enable_msi(alx->hw.pdev))
929 alx->flags |= ALX_FLAG_USING_MSI;
930 }
931}
932
933static void alx_disable_advanced_intr(struct alx_priv *alx)
934{
935 if (alx->flags & ALX_FLAG_USING_MSIX) {
936 kfree(alx->msix_entries);
937 pci_disable_msix(alx->hw.pdev);
938 alx->flags &= ~ALX_FLAG_USING_MSIX;
939 }
940
941 if (alx->flags & ALX_FLAG_USING_MSI) {
942 pci_disable_msi(alx->hw.pdev);
943 alx->flags &= ~ALX_FLAG_USING_MSI;
944 }
945}
946
947static void alx_irq_enable(struct alx_priv *alx)
948{
949 struct alx_hw *hw = &alx->hw;
950 int i;
951
952
953 alx_write_mem32(hw, ALX_ISR, 0);
954 alx_write_mem32(hw, ALX_IMR, alx->int_mask);
955 alx_post_write(hw);
956
957 if (alx->flags & ALX_FLAG_USING_MSIX)
958
959 for (i = 0; i < alx->num_vec; i++)
960 alx_mask_msix(hw, i, false);
961}
962
963static void alx_irq_disable(struct alx_priv *alx)
964{
965 struct alx_hw *hw = &alx->hw;
966 int i;
967
968 alx_write_mem32(hw, ALX_ISR, ALX_ISR_DIS);
969 alx_write_mem32(hw, ALX_IMR, 0);
970 alx_post_write(hw);
971
972 if (alx->flags & ALX_FLAG_USING_MSIX) {
973 for (i = 0; i < alx->num_vec; i++) {
974 alx_mask_msix(hw, i, true);
975 synchronize_irq(alx->msix_entries[i].vector);
976 }
977 } else {
978 synchronize_irq(alx->hw.pdev->irq);
979 }
980}
981
982static int alx_realloc_resources(struct alx_priv *alx)
983{
984 int err;
985
986 alx_free_rings(alx);
987 alx_free_napis(alx);
988 alx_disable_advanced_intr(alx);
989 alx_init_intr(alx, false);
990
991 err = alx_alloc_napis(alx);
992 if (err)
993 return err;
994
995 err = alx_alloc_rings(alx);
996 if (err)
997 return err;
998
999 return 0;
1000}
1001
1002static int alx_request_irq(struct alx_priv *alx)
1003{
1004 struct pci_dev *pdev = alx->hw.pdev;
1005 struct alx_hw *hw = &alx->hw;
1006 int err;
1007 u32 msi_ctrl;
1008
1009 msi_ctrl = (hw->imt >> 1) << ALX_MSI_RETRANS_TM_SHIFT;
1010
1011 if (alx->flags & ALX_FLAG_USING_MSIX) {
1012 alx_write_mem32(hw, ALX_MSI_RETRANS_TIMER, msi_ctrl);
1013 err = alx_request_msix(alx);
1014 if (!err)
1015 goto out;
1016
1017
1018 err = alx_realloc_resources(alx);
1019 if (err)
1020 goto out;
1021 }
1022
1023 if (alx->flags & ALX_FLAG_USING_MSI) {
1024 alx_write_mem32(hw, ALX_MSI_RETRANS_TIMER,
1025 msi_ctrl | ALX_MSI_MASK_SEL_LINE);
1026 err = request_irq(pdev->irq, alx_intr_msi, 0,
1027 alx->dev->name, alx);
1028 if (!err)
1029 goto out;
1030
1031 alx->flags &= ~ALX_FLAG_USING_MSI;
1032 pci_disable_msi(alx->hw.pdev);
1033 }
1034
1035 alx_write_mem32(hw, ALX_MSI_RETRANS_TIMER, 0);
1036 err = request_irq(pdev->irq, alx_intr_legacy, IRQF_SHARED,
1037 alx->dev->name, alx);
1038out:
1039 if (!err)
1040 alx_config_vector_mapping(alx);
1041 else
1042 netdev_err(alx->dev, "IRQ registration failed!\n");
1043 return err;
1044}
1045
1046static void alx_free_irq(struct alx_priv *alx)
1047{
1048 struct pci_dev *pdev = alx->hw.pdev;
1049 int i, vector = 0;
1050
1051 if (alx->flags & ALX_FLAG_USING_MSIX) {
1052 free_irq(alx->msix_entries[vector++].vector, alx);
1053 for (i = 0; i < alx->num_napi; i++)
1054 free_irq(alx->msix_entries[vector++].vector,
1055 alx->qnapi[i]);
1056 } else {
1057 free_irq(pdev->irq, alx);
1058 }
1059
1060 alx_disable_advanced_intr(alx);
1061}
1062
1063static int alx_identify_hw(struct alx_priv *alx)
1064{
1065 struct alx_hw *hw = &alx->hw;
1066 int rev = alx_hw_revision(hw);
1067
1068 if (rev > ALX_REV_C0)
1069 return -EINVAL;
1070
1071 hw->max_dma_chnl = rev >= ALX_REV_B0 ? 4 : 2;
1072
1073 return 0;
1074}
1075
1076static int alx_init_sw(struct alx_priv *alx)
1077{
1078 struct pci_dev *pdev = alx->hw.pdev;
1079 struct alx_hw *hw = &alx->hw;
1080 int err;
1081
1082 err = alx_identify_hw(alx);
1083 if (err) {
1084 dev_err(&pdev->dev, "unrecognized chip, aborting\n");
1085 return err;
1086 }
1087
1088 alx->hw.lnk_patch =
1089 pdev->device == ALX_DEV_ID_AR8161 &&
1090 pdev->subsystem_vendor == PCI_VENDOR_ID_ATTANSIC &&
1091 pdev->subsystem_device == 0x0091 &&
1092 pdev->revision == 0;
1093
1094 hw->smb_timer = 400;
1095 hw->mtu = alx->dev->mtu;
1096 alx->rxbuf_size = ALX_MAX_FRAME_LEN(hw->mtu);
1097 alx->tx_ringsz = 256;
1098 alx->rx_ringsz = 512;
1099 hw->imt = 200;
1100 alx->int_mask = ALX_ISR_MISC;
1101 hw->dma_chnl = hw->max_dma_chnl;
1102 hw->ith_tpd = alx->tx_ringsz / 3;
1103 hw->link_speed = SPEED_UNKNOWN;
1104 hw->duplex = DUPLEX_UNKNOWN;
1105 hw->adv_cfg = ADVERTISED_Autoneg |
1106 ADVERTISED_10baseT_Half |
1107 ADVERTISED_10baseT_Full |
1108 ADVERTISED_100baseT_Full |
1109 ADVERTISED_100baseT_Half |
1110 ADVERTISED_1000baseT_Full;
1111 hw->flowctrl = ALX_FC_ANEG | ALX_FC_RX | ALX_FC_TX;
1112
1113 hw->rx_ctrl = ALX_MAC_CTRL_WOLSPED_SWEN |
1114 ALX_MAC_CTRL_MHASH_ALG_HI5B |
1115 ALX_MAC_CTRL_BRD_EN |
1116 ALX_MAC_CTRL_PCRCE |
1117 ALX_MAC_CTRL_CRCE |
1118 ALX_MAC_CTRL_RXFC_EN |
1119 ALX_MAC_CTRL_TXFC_EN |
1120 7 << ALX_MAC_CTRL_PRMBLEN_SHIFT;
1121
1122 return err;
1123}
1124
1125
1126static netdev_features_t alx_fix_features(struct net_device *netdev,
1127 netdev_features_t features)
1128{
1129 if (netdev->mtu > ALX_MAX_TSO_PKT_SIZE)
1130 features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
1131
1132 return features;
1133}
1134
1135static void alx_netif_stop(struct alx_priv *alx)
1136{
1137 int i;
1138
1139 netif_trans_update(alx->dev);
1140 if (netif_carrier_ok(alx->dev)) {
1141 netif_carrier_off(alx->dev);
1142 netif_tx_disable(alx->dev);
1143 for (i = 0; i < alx->num_napi; i++)
1144 napi_disable(&alx->qnapi[i]->napi);
1145 }
1146}
1147
1148static void alx_halt(struct alx_priv *alx)
1149{
1150 struct alx_hw *hw = &alx->hw;
1151
1152 alx_netif_stop(alx);
1153 hw->link_speed = SPEED_UNKNOWN;
1154 hw->duplex = DUPLEX_UNKNOWN;
1155
1156 alx_reset_mac(hw);
1157
1158
1159 alx_enable_aspm(hw, false, false);
1160 alx_irq_disable(alx);
1161 alx_free_buffers(alx);
1162}
1163
1164static void alx_configure(struct alx_priv *alx)
1165{
1166 struct alx_hw *hw = &alx->hw;
1167
1168 alx_configure_basic(hw);
1169 alx_disable_rss(hw);
1170 __alx_set_rx_mode(alx->dev);
1171
1172 alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl);
1173}
1174
1175static void alx_activate(struct alx_priv *alx)
1176{
1177
1178 alx_reinit_rings(alx);
1179 alx_configure(alx);
1180
1181
1182 alx_write_mem32(&alx->hw, ALX_ISR, ~(u32)ALX_ISR_DIS);
1183
1184 alx_irq_enable(alx);
1185
1186 alx_schedule_link_check(alx);
1187}
1188
1189static void alx_reinit(struct alx_priv *alx)
1190{
1191 ASSERT_RTNL();
1192
1193 alx_halt(alx);
1194 alx_activate(alx);
1195}
1196
1197static int alx_change_mtu(struct net_device *netdev, int mtu)
1198{
1199 struct alx_priv *alx = netdev_priv(netdev);
1200 int max_frame = ALX_MAX_FRAME_LEN(mtu);
1201
1202 if ((max_frame < ALX_MIN_FRAME_SIZE) ||
1203 (max_frame > ALX_MAX_FRAME_SIZE))
1204 return -EINVAL;
1205
1206 if (netdev->mtu == mtu)
1207 return 0;
1208
1209 netdev->mtu = mtu;
1210 alx->hw.mtu = mtu;
1211 alx->rxbuf_size = max(max_frame, ALX_DEF_RXBUF_SIZE);
1212 netdev_update_features(netdev);
1213 if (netif_running(netdev))
1214 alx_reinit(alx);
1215 return 0;
1216}
1217
1218static void alx_netif_start(struct alx_priv *alx)
1219{
1220 int i;
1221
1222 netif_tx_wake_all_queues(alx->dev);
1223 for (i = 0; i < alx->num_napi; i++)
1224 napi_enable(&alx->qnapi[i]->napi);
1225 netif_carrier_on(alx->dev);
1226}
1227
1228static int __alx_open(struct alx_priv *alx, bool resume)
1229{
1230 int err;
1231
1232 alx_init_intr(alx, msix);
1233
1234 if (!resume)
1235 netif_carrier_off(alx->dev);
1236
1237 err = alx_alloc_napis(alx);
1238 if (err)
1239 goto out_disable_adv_intr;
1240
1241 err = alx_alloc_rings(alx);
1242 if (err)
1243 goto out_free_rings;
1244
1245 alx_configure(alx);
1246
1247 err = alx_request_irq(alx);
1248 if (err)
1249 goto out_free_rings;
1250
1251
1252
1253
1254
1255 alx_reinit_rings(alx);
1256
1257 netif_set_real_num_tx_queues(alx->dev, alx->num_txq);
1258 netif_set_real_num_rx_queues(alx->dev, alx->num_rxq);
1259
1260
1261 alx_write_mem32(&alx->hw, ALX_ISR, ~(u32)ALX_ISR_DIS);
1262
1263 alx_irq_enable(alx);
1264
1265 if (!resume)
1266 netif_tx_start_all_queues(alx->dev);
1267
1268 alx_schedule_link_check(alx);
1269 return 0;
1270
1271out_free_rings:
1272 alx_free_rings(alx);
1273 alx_free_napis(alx);
1274out_disable_adv_intr:
1275 alx_disable_advanced_intr(alx);
1276 return err;
1277}
1278
1279static void __alx_stop(struct alx_priv *alx)
1280{
1281 alx_halt(alx);
1282 alx_free_irq(alx);
1283 alx_free_rings(alx);
1284 alx_free_napis(alx);
1285}
1286
1287static const char *alx_speed_desc(struct alx_hw *hw)
1288{
1289 switch (alx_speed_to_ethadv(hw->link_speed, hw->duplex)) {
1290 case ADVERTISED_1000baseT_Full:
1291 return "1 Gbps Full";
1292 case ADVERTISED_100baseT_Full:
1293 return "100 Mbps Full";
1294 case ADVERTISED_100baseT_Half:
1295 return "100 Mbps Half";
1296 case ADVERTISED_10baseT_Full:
1297 return "10 Mbps Full";
1298 case ADVERTISED_10baseT_Half:
1299 return "10 Mbps Half";
1300 default:
1301 return "Unknown speed";
1302 }
1303}
1304
1305static void alx_check_link(struct alx_priv *alx)
1306{
1307 struct alx_hw *hw = &alx->hw;
1308 unsigned long flags;
1309 int old_speed;
1310 u8 old_duplex;
1311 int err;
1312
1313
1314
1315
1316 alx_clear_phy_intr(hw);
1317
1318 old_speed = hw->link_speed;
1319 old_duplex = hw->duplex;
1320 err = alx_read_phy_link(hw);
1321 if (err < 0)
1322 goto reset;
1323
1324 spin_lock_irqsave(&alx->irq_lock, flags);
1325 alx->int_mask |= ALX_ISR_PHY;
1326 alx_write_mem32(hw, ALX_IMR, alx->int_mask);
1327 spin_unlock_irqrestore(&alx->irq_lock, flags);
1328
1329 if (old_speed == hw->link_speed)
1330 return;
1331
1332 if (hw->link_speed != SPEED_UNKNOWN) {
1333 netif_info(alx, link, alx->dev,
1334 "NIC Up: %s\n", alx_speed_desc(hw));
1335 alx_post_phy_link(hw);
1336 alx_enable_aspm(hw, true, true);
1337 alx_start_mac(hw);
1338
1339 if (old_speed == SPEED_UNKNOWN)
1340 alx_netif_start(alx);
1341 } else {
1342
1343 alx_netif_stop(alx);
1344 netif_info(alx, link, alx->dev, "Link Down\n");
1345 err = alx_reset_mac(hw);
1346 if (err)
1347 goto reset;
1348 alx_irq_disable(alx);
1349
1350
1351 err = alx_reinit_rings(alx);
1352 if (err)
1353 goto reset;
1354 alx_configure(alx);
1355 alx_enable_aspm(hw, false, true);
1356 alx_post_phy_link(hw);
1357 alx_irq_enable(alx);
1358 }
1359
1360 return;
1361
1362reset:
1363 alx_schedule_reset(alx);
1364}
1365
1366static int alx_open(struct net_device *netdev)
1367{
1368 return __alx_open(netdev_priv(netdev), false);
1369}
1370
1371static int alx_stop(struct net_device *netdev)
1372{
1373 __alx_stop(netdev_priv(netdev));
1374 return 0;
1375}
1376
1377static void alx_link_check(struct work_struct *work)
1378{
1379 struct alx_priv *alx;
1380
1381 alx = container_of(work, struct alx_priv, link_check_wk);
1382
1383 rtnl_lock();
1384 alx_check_link(alx);
1385 rtnl_unlock();
1386}
1387
1388static void alx_reset(struct work_struct *work)
1389{
1390 struct alx_priv *alx = container_of(work, struct alx_priv, reset_wk);
1391
1392 rtnl_lock();
1393 alx_reinit(alx);
1394 rtnl_unlock();
1395}
1396
1397static int alx_tpd_req(struct sk_buff *skb)
1398{
1399 int num;
1400
1401 num = skb_shinfo(skb)->nr_frags + 1;
1402
1403 if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
1404 num++;
1405
1406 return num;
1407}
1408
1409static int alx_tx_csum(struct sk_buff *skb, struct alx_txd *first)
1410{
1411 u8 cso, css;
1412
1413 if (skb->ip_summed != CHECKSUM_PARTIAL)
1414 return 0;
1415
1416 cso = skb_checksum_start_offset(skb);
1417 if (cso & 1)
1418 return -EINVAL;
1419
1420 css = cso + skb->csum_offset;
1421 first->word1 |= cpu_to_le32((cso >> 1) << TPD_CXSUMSTART_SHIFT);
1422 first->word1 |= cpu_to_le32((css >> 1) << TPD_CXSUMOFFSET_SHIFT);
1423 first->word1 |= cpu_to_le32(1 << TPD_CXSUM_EN_SHIFT);
1424
1425 return 0;
1426}
1427
1428static int alx_tso(struct sk_buff *skb, struct alx_txd *first)
1429{
1430 int err;
1431
1432 if (skb->ip_summed != CHECKSUM_PARTIAL)
1433 return 0;
1434
1435 if (!skb_is_gso(skb))
1436 return 0;
1437
1438 err = skb_cow_head(skb, 0);
1439 if (err < 0)
1440 return err;
1441
1442 if (skb->protocol == htons(ETH_P_IP)) {
1443 struct iphdr *iph = ip_hdr(skb);
1444
1445 iph->check = 0;
1446 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
1447 0, IPPROTO_TCP, 0);
1448 first->word1 |= 1 << TPD_IPV4_SHIFT;
1449 } else if (skb_is_gso_v6(skb)) {
1450 ipv6_hdr(skb)->payload_len = 0;
1451 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
1452 &ipv6_hdr(skb)->daddr,
1453 0, IPPROTO_TCP, 0);
1454
1455 first->adrl.l.pkt_len = skb->len;
1456 first->word1 |= 1 << TPD_LSO_V2_SHIFT;
1457 }
1458
1459 first->word1 |= 1 << TPD_LSO_EN_SHIFT;
1460 first->word1 |= (skb_transport_offset(skb) &
1461 TPD_L4HDROFFSET_MASK) << TPD_L4HDROFFSET_SHIFT;
1462 first->word1 |= (skb_shinfo(skb)->gso_size &
1463 TPD_MSS_MASK) << TPD_MSS_SHIFT;
1464 return 1;
1465}
1466
1467static int alx_map_tx_skb(struct alx_tx_queue *txq, struct sk_buff *skb)
1468{
1469 struct alx_txd *tpd, *first_tpd;
1470 dma_addr_t dma;
1471 int maplen, f, first_idx = txq->write_idx;
1472
1473 first_tpd = &txq->tpd[txq->write_idx];
1474 tpd = first_tpd;
1475
1476 if (tpd->word1 & (1 << TPD_LSO_V2_SHIFT)) {
1477 if (++txq->write_idx == txq->count)
1478 txq->write_idx = 0;
1479
1480 tpd = &txq->tpd[txq->write_idx];
1481 tpd->len = first_tpd->len;
1482 tpd->vlan_tag = first_tpd->vlan_tag;
1483 tpd->word1 = first_tpd->word1;
1484 }
1485
1486 maplen = skb_headlen(skb);
1487 dma = dma_map_single(txq->dev, skb->data, maplen,
1488 DMA_TO_DEVICE);
1489 if (dma_mapping_error(txq->dev, dma))
1490 goto err_dma;
1491
1492 dma_unmap_len_set(&txq->bufs[txq->write_idx], size, maplen);
1493 dma_unmap_addr_set(&txq->bufs[txq->write_idx], dma, dma);
1494
1495 tpd->adrl.addr = cpu_to_le64(dma);
1496 tpd->len = cpu_to_le16(maplen);
1497
1498 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
1499 struct skb_frag_struct *frag;
1500
1501 frag = &skb_shinfo(skb)->frags[f];
1502
1503 if (++txq->write_idx == txq->count)
1504 txq->write_idx = 0;
1505 tpd = &txq->tpd[txq->write_idx];
1506
1507 tpd->word1 = first_tpd->word1;
1508
1509 maplen = skb_frag_size(frag);
1510 dma = skb_frag_dma_map(txq->dev, frag, 0,
1511 maplen, DMA_TO_DEVICE);
1512 if (dma_mapping_error(txq->dev, dma))
1513 goto err_dma;
1514 dma_unmap_len_set(&txq->bufs[txq->write_idx], size, maplen);
1515 dma_unmap_addr_set(&txq->bufs[txq->write_idx], dma, dma);
1516
1517 tpd->adrl.addr = cpu_to_le64(dma);
1518 tpd->len = cpu_to_le16(maplen);
1519 }
1520
1521
1522 tpd->word1 |= cpu_to_le32(1 << TPD_EOP_SHIFT);
1523 txq->bufs[txq->write_idx].skb = skb;
1524
1525 if (++txq->write_idx == txq->count)
1526 txq->write_idx = 0;
1527
1528 return 0;
1529
1530err_dma:
1531 f = first_idx;
1532 while (f != txq->write_idx) {
1533 alx_free_txbuf(txq, f);
1534 if (++f == txq->count)
1535 f = 0;
1536 }
1537 return -ENOMEM;
1538}
1539
1540static netdev_tx_t alx_start_xmit_ring(struct sk_buff *skb,
1541 struct alx_tx_queue *txq)
1542{
1543 struct alx_priv *alx;
1544 struct alx_txd *first;
1545 int tso;
1546
1547 alx = netdev_priv(txq->netdev);
1548
1549 if (alx_tpd_avail(txq) < alx_tpd_req(skb)) {
1550 netif_tx_stop_queue(alx_get_tx_queue(txq));
1551 goto drop;
1552 }
1553
1554 first = &txq->tpd[txq->write_idx];
1555 memset(first, 0, sizeof(*first));
1556
1557 tso = alx_tso(skb, first);
1558 if (tso < 0)
1559 goto drop;
1560 else if (!tso && alx_tx_csum(skb, first))
1561 goto drop;
1562
1563 if (alx_map_tx_skb(txq, skb) < 0)
1564 goto drop;
1565
1566 netdev_tx_sent_queue(alx_get_tx_queue(txq), skb->len);
1567
1568
1569 wmb();
1570 alx_write_mem16(&alx->hw, txq->p_reg, txq->write_idx);
1571
1572 if (alx_tpd_avail(txq) < txq->count / 8)
1573 netif_tx_stop_queue(alx_get_tx_queue(txq));
1574
1575 return NETDEV_TX_OK;
1576
1577drop:
1578 dev_kfree_skb_any(skb);
1579 return NETDEV_TX_OK;
1580}
1581
1582static netdev_tx_t alx_start_xmit(struct sk_buff *skb,
1583 struct net_device *netdev)
1584{
1585 struct alx_priv *alx = netdev_priv(netdev);
1586 return alx_start_xmit_ring(skb, alx_tx_queue_mapping(alx, skb));
1587}
1588
1589static void alx_tx_timeout(struct net_device *dev)
1590{
1591 struct alx_priv *alx = netdev_priv(dev);
1592
1593 alx_schedule_reset(alx);
1594}
1595
1596static int alx_mdio_read(struct net_device *netdev,
1597 int prtad, int devad, u16 addr)
1598{
1599 struct alx_priv *alx = netdev_priv(netdev);
1600 struct alx_hw *hw = &alx->hw;
1601 u16 val;
1602 int err;
1603
1604 if (prtad != hw->mdio.prtad)
1605 return -EINVAL;
1606
1607 if (devad == MDIO_DEVAD_NONE)
1608 err = alx_read_phy_reg(hw, addr, &val);
1609 else
1610 err = alx_read_phy_ext(hw, devad, addr, &val);
1611
1612 if (err)
1613 return err;
1614 return val;
1615}
1616
1617static int alx_mdio_write(struct net_device *netdev,
1618 int prtad, int devad, u16 addr, u16 val)
1619{
1620 struct alx_priv *alx = netdev_priv(netdev);
1621 struct alx_hw *hw = &alx->hw;
1622
1623 if (prtad != hw->mdio.prtad)
1624 return -EINVAL;
1625
1626 if (devad == MDIO_DEVAD_NONE)
1627 return alx_write_phy_reg(hw, addr, val);
1628
1629 return alx_write_phy_ext(hw, devad, addr, val);
1630}
1631
1632static int alx_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
1633{
1634 struct alx_priv *alx = netdev_priv(netdev);
1635
1636 if (!netif_running(netdev))
1637 return -EAGAIN;
1638
1639 return mdio_mii_ioctl(&alx->hw.mdio, if_mii(ifr), cmd);
1640}
1641
1642#ifdef CONFIG_NET_POLL_CONTROLLER
1643static void alx_poll_controller(struct net_device *netdev)
1644{
1645 struct alx_priv *alx = netdev_priv(netdev);
1646 int i;
1647
1648 if (alx->flags & ALX_FLAG_USING_MSIX) {
1649 alx_intr_msix_misc(0, alx);
1650 for (i = 0; i < alx->num_txq; i++)
1651 alx_intr_msix_ring(0, alx->qnapi[i]);
1652 } else if (alx->flags & ALX_FLAG_USING_MSI)
1653 alx_intr_msi(0, alx);
1654 else
1655 alx_intr_legacy(0, alx);
1656}
1657#endif
1658
1659static void alx_get_stats64(struct net_device *dev,
1660 struct rtnl_link_stats64 *net_stats)
1661{
1662 struct alx_priv *alx = netdev_priv(dev);
1663 struct alx_hw_stats *hw_stats = &alx->hw.stats;
1664
1665 spin_lock(&alx->stats_lock);
1666
1667 alx_update_hw_stats(&alx->hw);
1668
1669 net_stats->tx_bytes = hw_stats->tx_byte_cnt;
1670 net_stats->rx_bytes = hw_stats->rx_byte_cnt;
1671 net_stats->multicast = hw_stats->rx_mcast;
1672 net_stats->collisions = hw_stats->tx_single_col +
1673 hw_stats->tx_multi_col +
1674 hw_stats->tx_late_col +
1675 hw_stats->tx_abort_col;
1676
1677 net_stats->rx_errors = hw_stats->rx_frag +
1678 hw_stats->rx_fcs_err +
1679 hw_stats->rx_len_err +
1680 hw_stats->rx_ov_sz +
1681 hw_stats->rx_ov_rrd +
1682 hw_stats->rx_align_err +
1683 hw_stats->rx_ov_rxf;
1684
1685 net_stats->rx_fifo_errors = hw_stats->rx_ov_rxf;
1686 net_stats->rx_length_errors = hw_stats->rx_len_err;
1687 net_stats->rx_crc_errors = hw_stats->rx_fcs_err;
1688 net_stats->rx_frame_errors = hw_stats->rx_align_err;
1689 net_stats->rx_dropped = hw_stats->rx_ov_rrd;
1690
1691 net_stats->tx_errors = hw_stats->tx_late_col +
1692 hw_stats->tx_abort_col +
1693 hw_stats->tx_underrun +
1694 hw_stats->tx_trunc;
1695
1696 net_stats->tx_aborted_errors = hw_stats->tx_abort_col;
1697 net_stats->tx_fifo_errors = hw_stats->tx_underrun;
1698 net_stats->tx_window_errors = hw_stats->tx_late_col;
1699
1700 net_stats->tx_packets = hw_stats->tx_ok + net_stats->tx_errors;
1701 net_stats->rx_packets = hw_stats->rx_ok + net_stats->rx_errors;
1702
1703 spin_unlock(&alx->stats_lock);
1704}
1705
1706static const struct net_device_ops alx_netdev_ops = {
1707 .ndo_open = alx_open,
1708 .ndo_stop = alx_stop,
1709 .ndo_start_xmit = alx_start_xmit,
1710 .ndo_get_stats64 = alx_get_stats64,
1711 .ndo_set_rx_mode = alx_set_rx_mode,
1712 .ndo_validate_addr = eth_validate_addr,
1713 .ndo_set_mac_address = alx_set_mac_address,
1714 .ndo_change_mtu_rh74 = alx_change_mtu,
1715 .ndo_do_ioctl = alx_ioctl,
1716 .ndo_tx_timeout = alx_tx_timeout,
1717 .ndo_fix_features = alx_fix_features,
1718#ifdef CONFIG_NET_POLL_CONTROLLER
1719 .ndo_poll_controller = alx_poll_controller,
1720#endif
1721};
1722
1723static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1724{
1725 struct net_device *netdev;
1726 struct alx_priv *alx;
1727 struct alx_hw *hw;
1728 bool phy_configured;
1729 int bars, err;
1730
1731 err = pci_enable_device_mem(pdev);
1732 if (err)
1733 return err;
1734
1735
1736
1737
1738
1739 if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
1740 dev_dbg(&pdev->dev, "DMA to 64-BIT addresses\n");
1741 } else {
1742 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1743 if (err) {
1744 dev_err(&pdev->dev, "No usable DMA config, aborting\n");
1745 goto out_pci_disable;
1746 }
1747 }
1748
1749 bars = pci_select_bars(pdev, IORESOURCE_MEM);
1750 err = pci_request_selected_regions(pdev, bars, alx_drv_name);
1751 if (err) {
1752 dev_err(&pdev->dev,
1753 "pci_request_selected_regions failed(bars:%d)\n", bars);
1754 goto out_pci_disable;
1755 }
1756
1757 pci_enable_pcie_error_reporting(pdev);
1758 pci_set_master(pdev);
1759
1760 if (!pdev->pm_cap) {
1761 dev_err(&pdev->dev,
1762 "Can't find power management capability, aborting\n");
1763 err = -EIO;
1764 goto out_pci_release;
1765 }
1766
1767 netdev = alloc_etherdev_mqs(sizeof(*alx),
1768 ALX_MAX_TX_QUEUES, 1);
1769 if (!netdev) {
1770 err = -ENOMEM;
1771 goto out_pci_release;
1772 }
1773
1774 SET_NETDEV_DEV(netdev, &pdev->dev);
1775 alx = netdev_priv(netdev);
1776 spin_lock_init(&alx->hw.mdio_lock);
1777 spin_lock_init(&alx->irq_lock);
1778 spin_lock_init(&alx->stats_lock);
1779 alx->dev = netdev;
1780 alx->hw.pdev = pdev;
1781 alx->msg_enable = NETIF_MSG_LINK | NETIF_MSG_HW | NETIF_MSG_IFUP |
1782 NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR | NETIF_MSG_WOL;
1783 hw = &alx->hw;
1784 pci_set_drvdata(pdev, alx);
1785
1786 hw->hw_addr = pci_ioremap_bar(pdev, 0);
1787 if (!hw->hw_addr) {
1788 dev_err(&pdev->dev, "cannot map device registers\n");
1789 err = -EIO;
1790 goto out_free_netdev;
1791 }
1792
1793 netdev->netdev_ops = &alx_netdev_ops;
1794 netdev->ethtool_ops = &alx_ethtool_ops;
1795 netdev->irq = pdev->irq;
1796 netdev->watchdog_timeo = ALX_WATCHDOG_TIME;
1797
1798 if (ent->driver_data & ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG)
1799 pdev->dev_flags |= PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG;
1800
1801 err = alx_init_sw(alx);
1802 if (err) {
1803 dev_err(&pdev->dev, "net device private data init failed\n");
1804 goto out_unmap;
1805 }
1806
1807 alx_reset_pcie(hw);
1808
1809 phy_configured = alx_phy_configured(hw);
1810
1811 if (!phy_configured)
1812 alx_reset_phy(hw);
1813
1814 err = alx_reset_mac(hw);
1815 if (err) {
1816 dev_err(&pdev->dev, "MAC Reset failed, error = %d\n", err);
1817 goto out_unmap;
1818 }
1819
1820
1821 if (!phy_configured) {
1822 err = alx_setup_speed_duplex(hw, hw->adv_cfg, hw->flowctrl);
1823 if (err) {
1824 dev_err(&pdev->dev,
1825 "failed to configure PHY speed/duplex (err=%d)\n",
1826 err);
1827 goto out_unmap;
1828 }
1829 }
1830
1831 netdev->hw_features = NETIF_F_SG |
1832 NETIF_F_HW_CSUM |
1833 NETIF_F_TSO |
1834 NETIF_F_TSO6;
1835
1836 if (alx_get_perm_macaddr(hw, hw->perm_addr)) {
1837 dev_warn(&pdev->dev,
1838 "Invalid permanent address programmed, using random one\n");
1839 eth_hw_addr_random(netdev);
1840 memcpy(hw->perm_addr, netdev->dev_addr, netdev->addr_len);
1841 }
1842
1843 memcpy(hw->mac_addr, hw->perm_addr, ETH_ALEN);
1844 memcpy(netdev->dev_addr, hw->mac_addr, ETH_ALEN);
1845 memcpy(netdev->perm_addr, hw->perm_addr, ETH_ALEN);
1846
1847 hw->mdio.prtad = 0;
1848 hw->mdio.mmds = 0;
1849 hw->mdio.dev = netdev;
1850 hw->mdio.mode_support = MDIO_SUPPORTS_C45 |
1851 MDIO_SUPPORTS_C22 |
1852 MDIO_EMULATE_C22;
1853 hw->mdio.mdio_read = alx_mdio_read;
1854 hw->mdio.mdio_write = alx_mdio_write;
1855
1856 if (!alx_get_phy_info(hw)) {
1857 dev_err(&pdev->dev, "failed to identify PHY\n");
1858 err = -EIO;
1859 goto out_unmap;
1860 }
1861
1862 INIT_WORK(&alx->link_check_wk, alx_link_check);
1863 INIT_WORK(&alx->reset_wk, alx_reset);
1864 netif_carrier_off(netdev);
1865
1866 err = register_netdev(netdev);
1867 if (err) {
1868 dev_err(&pdev->dev, "register netdevice failed\n");
1869 goto out_unmap;
1870 }
1871
1872 netdev_info(netdev,
1873 "Qualcomm Atheros AR816x/AR817x Ethernet [%pM]\n",
1874 netdev->dev_addr);
1875
1876 return 0;
1877
1878out_unmap:
1879 iounmap(hw->hw_addr);
1880out_free_netdev:
1881 free_netdev(netdev);
1882out_pci_release:
1883 pci_release_selected_regions(pdev, bars);
1884out_pci_disable:
1885 pci_disable_device(pdev);
1886 return err;
1887}
1888
1889static void alx_remove(struct pci_dev *pdev)
1890{
1891 struct alx_priv *alx = pci_get_drvdata(pdev);
1892 struct alx_hw *hw = &alx->hw;
1893
1894 cancel_work_sync(&alx->link_check_wk);
1895 cancel_work_sync(&alx->reset_wk);
1896
1897
1898 alx_set_macaddr(hw, hw->perm_addr);
1899
1900 unregister_netdev(alx->dev);
1901 iounmap(hw->hw_addr);
1902 pci_release_selected_regions(pdev,
1903 pci_select_bars(pdev, IORESOURCE_MEM));
1904
1905 pci_disable_pcie_error_reporting(pdev);
1906 pci_disable_device(pdev);
1907
1908 free_netdev(alx->dev);
1909}
1910
1911#ifdef CONFIG_PM_SLEEP
1912static int alx_suspend(struct device *dev)
1913{
1914 struct pci_dev *pdev = to_pci_dev(dev);
1915 struct alx_priv *alx = pci_get_drvdata(pdev);
1916
1917 if (!netif_running(alx->dev))
1918 return 0;
1919 netif_device_detach(alx->dev);
1920 __alx_stop(alx);
1921 return 0;
1922}
1923
1924static int alx_resume(struct device *dev)
1925{
1926 struct pci_dev *pdev = to_pci_dev(dev);
1927 struct alx_priv *alx = pci_get_drvdata(pdev);
1928 struct alx_hw *hw = &alx->hw;
1929
1930 alx_reset_phy(hw);
1931
1932 if (!netif_running(alx->dev))
1933 return 0;
1934 netif_device_attach(alx->dev);
1935 return __alx_open(alx, true);
1936}
1937
1938static SIMPLE_DEV_PM_OPS(alx_pm_ops, alx_suspend, alx_resume);
1939#define ALX_PM_OPS (&alx_pm_ops)
1940#else
1941#define ALX_PM_OPS NULL
1942#endif
1943
1944
1945static pci_ers_result_t alx_pci_error_detected(struct pci_dev *pdev,
1946 pci_channel_state_t state)
1947{
1948 struct alx_priv *alx = pci_get_drvdata(pdev);
1949 struct net_device *netdev = alx->dev;
1950 pci_ers_result_t rc = PCI_ERS_RESULT_NEED_RESET;
1951
1952 dev_info(&pdev->dev, "pci error detected\n");
1953
1954 rtnl_lock();
1955
1956 if (netif_running(netdev)) {
1957 netif_device_detach(netdev);
1958 alx_halt(alx);
1959 }
1960
1961 if (state == pci_channel_io_perm_failure)
1962 rc = PCI_ERS_RESULT_DISCONNECT;
1963 else
1964 pci_disable_device(pdev);
1965
1966 rtnl_unlock();
1967
1968 return rc;
1969}
1970
1971static pci_ers_result_t alx_pci_error_slot_reset(struct pci_dev *pdev)
1972{
1973 struct alx_priv *alx = pci_get_drvdata(pdev);
1974 struct alx_hw *hw = &alx->hw;
1975 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
1976
1977 dev_info(&pdev->dev, "pci error slot reset\n");
1978
1979 rtnl_lock();
1980
1981 if (pci_enable_device(pdev)) {
1982 dev_err(&pdev->dev, "Failed to re-enable PCI device after reset\n");
1983 goto out;
1984 }
1985
1986 pci_set_master(pdev);
1987
1988 alx_reset_pcie(hw);
1989 if (!alx_reset_mac(hw))
1990 rc = PCI_ERS_RESULT_RECOVERED;
1991out:
1992 pci_cleanup_aer_uncorrect_error_status(pdev);
1993
1994 rtnl_unlock();
1995
1996 return rc;
1997}
1998
1999static void alx_pci_error_resume(struct pci_dev *pdev)
2000{
2001 struct alx_priv *alx = pci_get_drvdata(pdev);
2002 struct net_device *netdev = alx->dev;
2003
2004 dev_info(&pdev->dev, "pci error resume\n");
2005
2006 rtnl_lock();
2007
2008 if (netif_running(netdev)) {
2009 alx_activate(alx);
2010 netif_device_attach(netdev);
2011 }
2012
2013 rtnl_unlock();
2014}
2015
2016static const struct pci_error_handlers alx_err_handlers = {
2017 .error_detected = alx_pci_error_detected,
2018 .slot_reset = alx_pci_error_slot_reset,
2019 .resume = alx_pci_error_resume,
2020};
2021
2022static const struct pci_device_id alx_pci_tbl[] = {
2023 { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8161),
2024 .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
2025 { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_E2200),
2026 .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
2027 { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_E2400),
2028 .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
2029 { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_E2500),
2030 .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
2031 { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8162),
2032 .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
2033 { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8171) },
2034 { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8172) },
2035 {}
2036};
2037
2038static struct pci_driver alx_driver = {
2039 .name = alx_drv_name,
2040 .id_table = alx_pci_tbl,
2041 .probe = alx_probe,
2042 .remove = alx_remove,
2043 .err_handler = &alx_err_handlers,
2044 .driver.pm = ALX_PM_OPS,
2045};
2046
2047module_pci_driver(alx_driver);
2048MODULE_DEVICE_TABLE(pci, alx_pci_tbl);
2049MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>");
2050MODULE_AUTHOR("Qualcomm Corporation, <nic-devel@qualcomm.com>");
2051MODULE_DESCRIPTION(
2052 "Qualcomm Atheros(R) AR816x/AR817x PCI-E Ethernet Network Driver");
2053MODULE_LICENSE("GPL");
2054