1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#include "pch_gbe.h"
22#include "pch_gbe_api.h"
23#include <linux/module.h>
24#include <linux/net_tstamp.h>
25#include <linux/ptp_classify.h>
26
27#define DRV_VERSION "1.01"
28const char pch_driver_version[] = DRV_VERSION;
29
30#define PCI_DEVICE_ID_INTEL_IOH1_GBE 0x8802
31#define PCH_GBE_MAR_ENTRIES 16
32#define PCH_GBE_SHORT_PKT 64
33#define DSC_INIT16 0xC000
34#define PCH_GBE_DMA_ALIGN 0
35#define PCH_GBE_DMA_PADDING 2
36#define PCH_GBE_WATCHDOG_PERIOD (5 * HZ)
37#define PCH_GBE_COPYBREAK_DEFAULT 256
38#define PCH_GBE_PCI_BAR 1
39#define PCH_GBE_RESERVE_MEMORY 0x200000
40
41
42#define PCI_VENDOR_ID_ROHM 0x10db
43#define PCI_DEVICE_ID_ROHM_ML7223_GBE 0x8013
44
45
46#define PCI_DEVICE_ID_ROHM_ML7831_GBE 0x8802
47
48#define PCH_GBE_TX_WEIGHT 64
49#define PCH_GBE_RX_WEIGHT 64
50#define PCH_GBE_RX_BUFFER_WRITE 16
51
52
53#define PCH_GBE_WL_INIT_SETTING (PCH_GBE_WLC_MP)
54
55#define PCH_GBE_MAC_RGMII_CTRL_SETTING ( \
56 PCH_GBE_CHIP_TYPE_INTERNAL | \
57 PCH_GBE_RGMII_MODE_RGMII \
58 )
59
60
61#define PCH_GBE_MAX_RX_BUFFER_SIZE 0x2880
62#define PCH_GBE_MAX_JUMBO_FRAME_SIZE 10318
63#define PCH_GBE_FRAME_SIZE_2048 2048
64#define PCH_GBE_FRAME_SIZE_4096 4096
65#define PCH_GBE_FRAME_SIZE_8192 8192
66
67#define PCH_GBE_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i]))
68#define PCH_GBE_RX_DESC(R, i) PCH_GBE_GET_DESC(R, i, pch_gbe_rx_desc)
69#define PCH_GBE_TX_DESC(R, i) PCH_GBE_GET_DESC(R, i, pch_gbe_tx_desc)
70#define PCH_GBE_DESC_UNUSED(R) \
71 ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
72 (R)->next_to_clean - (R)->next_to_use - 1)
73
74
75#define PCH_GBE_PAUSE_PKT1_VALUE 0x00C28001
76#define PCH_GBE_PAUSE_PKT2_VALUE 0x00000100
77#define PCH_GBE_PAUSE_PKT4_VALUE 0x01000888
78#define PCH_GBE_PAUSE_PKT5_VALUE 0x0000FFFF
79
80
81
82
83
84
85
86
87
88
89#define PCH_GBE_INT_ENABLE_MASK ( \
90 PCH_GBE_INT_RX_DMA_CMPLT | \
91 PCH_GBE_INT_RX_DSC_EMP | \
92 PCH_GBE_INT_RX_FIFO_ERR | \
93 PCH_GBE_INT_WOL_DET | \
94 PCH_GBE_INT_TX_CMPLT \
95 )
96
97#define PCH_GBE_INT_DISABLE_ALL 0
98
99
100
101#define MASTER_MODE (1<<0)
102#define SLAVE_MODE (0)
103#define V2_MODE (1<<31)
104#define CAP_MODE0 (0)
105#define CAP_MODE2 (1<<17)
106
107
108#define TX_SNAPSHOT_LOCKED (1<<0)
109#define RX_SNAPSHOT_LOCKED (1<<1)
110
111#define PTP_L4_MULTICAST_SA "01:00:5e:00:01:81"
112#define PTP_L2_MULTICAST_SA "01:1b:19:00:00:00"
113
114static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT;
115
116static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg);
117static void pch_gbe_mdio_write(struct net_device *netdev, int addr, int reg,
118 int data);
119static void pch_gbe_set_multi(struct net_device *netdev);
120
121static struct sock_filter ptp_filter[] = {
122 PTP_FILTER
123};
124
125static int pch_ptp_match(struct sk_buff *skb, u16 uid_hi, u32 uid_lo, u16 seqid)
126{
127 u8 *data = skb->data;
128 unsigned int offset;
129 u16 *hi, *id;
130 u32 lo;
131
132 if (sk_run_filter(skb, ptp_filter) == PTP_CLASS_NONE)
133 return 0;
134
135 offset = ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN;
136
137 if (skb->len < offset + OFF_PTP_SEQUENCE_ID + sizeof(seqid))
138 return 0;
139
140 hi = (u16 *)(data + offset + OFF_PTP_SOURCE_UUID);
141 id = (u16 *)(data + offset + OFF_PTP_SEQUENCE_ID);
142
143 memcpy(&lo, &hi[1], sizeof(lo));
144
145 return (uid_hi == *hi &&
146 uid_lo == lo &&
147 seqid == *id);
148}
149
150static void
151pch_rx_timestamp(struct pch_gbe_adapter *adapter, struct sk_buff *skb)
152{
153 struct skb_shared_hwtstamps *shhwtstamps;
154 struct pci_dev *pdev;
155 u64 ns;
156 u32 hi, lo, val;
157 u16 uid, seq;
158
159 if (!adapter->hwts_rx_en)
160 return;
161
162
163 pdev = adapter->ptp_pdev;
164
165 val = pch_ch_event_read(pdev);
166
167 if (!(val & RX_SNAPSHOT_LOCKED))
168 return;
169
170 lo = pch_src_uuid_lo_read(pdev);
171 hi = pch_src_uuid_hi_read(pdev);
172
173 uid = hi & 0xffff;
174 seq = (hi >> 16) & 0xffff;
175
176 if (!pch_ptp_match(skb, htons(uid), htonl(lo), htons(seq)))
177 goto out;
178
179 ns = pch_rx_snap_read(pdev);
180
181 shhwtstamps = skb_hwtstamps(skb);
182 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
183 shhwtstamps->hwtstamp = ns_to_ktime(ns);
184out:
185 pch_ch_event_write(pdev, RX_SNAPSHOT_LOCKED);
186}
187
188static void
189pch_tx_timestamp(struct pch_gbe_adapter *adapter, struct sk_buff *skb)
190{
191 struct skb_shared_hwtstamps shhwtstamps;
192 struct pci_dev *pdev;
193 struct skb_shared_info *shtx;
194 u64 ns;
195 u32 cnt, val;
196
197 shtx = skb_shinfo(skb);
198 if (likely(!(shtx->tx_flags & SKBTX_HW_TSTAMP && adapter->hwts_tx_en)))
199 return;
200
201 shtx->tx_flags |= SKBTX_IN_PROGRESS;
202
203
204 pdev = adapter->ptp_pdev;
205
206
207
208
209 for (cnt = 0; cnt < 100; cnt++) {
210 val = pch_ch_event_read(pdev);
211 if (val & TX_SNAPSHOT_LOCKED)
212 break;
213 udelay(1);
214 }
215 if (!(val & TX_SNAPSHOT_LOCKED)) {
216 shtx->tx_flags &= ~SKBTX_IN_PROGRESS;
217 return;
218 }
219
220 ns = pch_tx_snap_read(pdev);
221
222 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
223 shhwtstamps.hwtstamp = ns_to_ktime(ns);
224 skb_tstamp_tx(skb, &shhwtstamps);
225
226 pch_ch_event_write(pdev, TX_SNAPSHOT_LOCKED);
227}
228
229static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
230{
231 struct hwtstamp_config cfg;
232 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
233 struct pci_dev *pdev;
234 u8 station[20];
235
236 if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
237 return -EFAULT;
238
239 if (cfg.flags)
240 return -EINVAL;
241
242
243 pdev = adapter->ptp_pdev;
244
245 switch (cfg.tx_type) {
246 case HWTSTAMP_TX_OFF:
247 adapter->hwts_tx_en = 0;
248 break;
249 case HWTSTAMP_TX_ON:
250 adapter->hwts_tx_en = 1;
251 break;
252 default:
253 return -ERANGE;
254 }
255
256 switch (cfg.rx_filter) {
257 case HWTSTAMP_FILTER_NONE:
258 adapter->hwts_rx_en = 0;
259 break;
260 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
261 adapter->hwts_rx_en = 0;
262 pch_ch_control_write(pdev, SLAVE_MODE | CAP_MODE0);
263 break;
264 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
265 adapter->hwts_rx_en = 1;
266 pch_ch_control_write(pdev, MASTER_MODE | CAP_MODE0);
267 break;
268 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
269 adapter->hwts_rx_en = 1;
270 pch_ch_control_write(pdev, V2_MODE | CAP_MODE2);
271 strcpy(station, PTP_L4_MULTICAST_SA);
272 pch_set_station_address(station, pdev);
273 break;
274 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
275 adapter->hwts_rx_en = 1;
276 pch_ch_control_write(pdev, V2_MODE | CAP_MODE2);
277 strcpy(station, PTP_L2_MULTICAST_SA);
278 pch_set_station_address(station, pdev);
279 break;
280 default:
281 return -ERANGE;
282 }
283
284
285 pch_ch_event_write(pdev, TX_SNAPSHOT_LOCKED | RX_SNAPSHOT_LOCKED);
286
287 return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
288}
289
290inline void pch_gbe_mac_load_mac_addr(struct pch_gbe_hw *hw)
291{
292 iowrite32(0x01, &hw->reg->MAC_ADDR_LOAD);
293}
294
295
296
297
298
299
300
301s32 pch_gbe_mac_read_mac_addr(struct pch_gbe_hw *hw)
302{
303 u32 adr1a, adr1b;
304
305 adr1a = ioread32(&hw->reg->mac_adr[0].high);
306 adr1b = ioread32(&hw->reg->mac_adr[0].low);
307
308 hw->mac.addr[0] = (u8)(adr1a & 0xFF);
309 hw->mac.addr[1] = (u8)((adr1a >> 8) & 0xFF);
310 hw->mac.addr[2] = (u8)((adr1a >> 16) & 0xFF);
311 hw->mac.addr[3] = (u8)((adr1a >> 24) & 0xFF);
312 hw->mac.addr[4] = (u8)(adr1b & 0xFF);
313 hw->mac.addr[5] = (u8)((adr1b >> 8) & 0xFF);
314
315 pr_debug("hw->mac.addr : %pM\n", hw->mac.addr);
316 return 0;
317}
318
319
320
321
322
323
324static void pch_gbe_wait_clr_bit(void *reg, u32 bit)
325{
326 u32 tmp;
327
328 tmp = 1000;
329 while ((ioread32(reg) & bit) && --tmp)
330 cpu_relax();
331 if (!tmp)
332 pr_err("Error: busy bit is not cleared\n");
333}
334
335
336
337
338
339
340
341static void pch_gbe_mac_mar_set(struct pch_gbe_hw *hw, u8 * addr, u32 index)
342{
343 u32 mar_low, mar_high, adrmask;
344
345 pr_debug("index : 0x%x\n", index);
346
347
348
349
350
351 mar_high = ((u32) addr[0] | ((u32) addr[1] << 8) |
352 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
353 mar_low = ((u32) addr[4] | ((u32) addr[5] << 8));
354
355 adrmask = ioread32(&hw->reg->ADDR_MASK);
356 iowrite32((adrmask | (0x0001 << index)), &hw->reg->ADDR_MASK);
357
358 pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
359
360 iowrite32(mar_high, &hw->reg->mac_adr[index].high);
361 iowrite32(mar_low, &hw->reg->mac_adr[index].low);
362
363 iowrite32((adrmask & ~(0x0001 << index)), &hw->reg->ADDR_MASK);
364
365 pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
366}
367
368
369
370
371
372static void pch_gbe_mac_reset_hw(struct pch_gbe_hw *hw)
373{
374
375 pch_gbe_mac_read_mac_addr(hw);
376 iowrite32(PCH_GBE_ALL_RST, &hw->reg->RESET);
377#ifdef PCH_GBE_MAC_IFOP_RGMII
378 iowrite32(PCH_GBE_MODE_GMII_ETHER, &hw->reg->MODE);
379#endif
380 pch_gbe_wait_clr_bit(&hw->reg->RESET, PCH_GBE_ALL_RST);
381
382 pch_gbe_mac_mar_set(hw, hw->mac.addr, 0);
383 return;
384}
385
386static void pch_gbe_disable_mac_rx(struct pch_gbe_hw *hw)
387{
388 u32 rctl;
389
390 rctl = ioread32(&hw->reg->MAC_RX_EN);
391 iowrite32((rctl & ~PCH_GBE_MRE_MAC_RX_EN), &hw->reg->MAC_RX_EN);
392}
393
394static void pch_gbe_enable_mac_rx(struct pch_gbe_hw *hw)
395{
396 u32 rctl;
397
398 rctl = ioread32(&hw->reg->MAC_RX_EN);
399 iowrite32((rctl | PCH_GBE_MRE_MAC_RX_EN), &hw->reg->MAC_RX_EN);
400}
401
402
403
404
405
406
407static void pch_gbe_mac_init_rx_addrs(struct pch_gbe_hw *hw, u16 mar_count)
408{
409 u32 i;
410
411
412 pch_gbe_mac_mar_set(hw, hw->mac.addr, 0);
413
414
415 for (i = 1; i < mar_count; i++) {
416 iowrite32(0, &hw->reg->mac_adr[i].high);
417 iowrite32(0, &hw->reg->mac_adr[i].low);
418 }
419 iowrite32(0xFFFE, &hw->reg->ADDR_MASK);
420
421 pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
422}
423
424
425
426
427
428
429
430
431
432
433static void pch_gbe_mac_mc_addr_list_update(struct pch_gbe_hw *hw,
434 u8 *mc_addr_list, u32 mc_addr_count,
435 u32 mar_used_count, u32 mar_total_num)
436{
437 u32 i, adrmask;
438
439
440
441
442
443 for (i = mar_used_count; i < mar_total_num; i++) {
444 if (mc_addr_count) {
445 pch_gbe_mac_mar_set(hw, mc_addr_list, i);
446 mc_addr_count--;
447 mc_addr_list += ETH_ALEN;
448 } else {
449
450 adrmask = ioread32(&hw->reg->ADDR_MASK);
451 iowrite32((adrmask | (0x0001 << i)),
452 &hw->reg->ADDR_MASK);
453
454 pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
455
456 iowrite32(0, &hw->reg->mac_adr[i].high);
457 iowrite32(0, &hw->reg->mac_adr[i].low);
458 }
459 }
460}
461
462
463
464
465
466
467
468
469s32 pch_gbe_mac_force_mac_fc(struct pch_gbe_hw *hw)
470{
471 struct pch_gbe_mac_info *mac = &hw->mac;
472 u32 rx_fctrl;
473
474 pr_debug("mac->fc = %u\n", mac->fc);
475
476 rx_fctrl = ioread32(&hw->reg->RX_FCTRL);
477
478 switch (mac->fc) {
479 case PCH_GBE_FC_NONE:
480 rx_fctrl &= ~PCH_GBE_FL_CTRL_EN;
481 mac->tx_fc_enable = false;
482 break;
483 case PCH_GBE_FC_RX_PAUSE:
484 rx_fctrl |= PCH_GBE_FL_CTRL_EN;
485 mac->tx_fc_enable = false;
486 break;
487 case PCH_GBE_FC_TX_PAUSE:
488 rx_fctrl &= ~PCH_GBE_FL_CTRL_EN;
489 mac->tx_fc_enable = true;
490 break;
491 case PCH_GBE_FC_FULL:
492 rx_fctrl |= PCH_GBE_FL_CTRL_EN;
493 mac->tx_fc_enable = true;
494 break;
495 default:
496 pr_err("Flow control param set incorrectly\n");
497 return -EINVAL;
498 }
499 if (mac->link_duplex == DUPLEX_HALF)
500 rx_fctrl &= ~PCH_GBE_FL_CTRL_EN;
501 iowrite32(rx_fctrl, &hw->reg->RX_FCTRL);
502 pr_debug("RX_FCTRL reg : 0x%08x mac->tx_fc_enable : %d\n",
503 ioread32(&hw->reg->RX_FCTRL), mac->tx_fc_enable);
504 return 0;
505}
506
507
508
509
510
511
512static void pch_gbe_mac_set_wol_event(struct pch_gbe_hw *hw, u32 wu_evt)
513{
514 u32 addr_mask;
515
516 pr_debug("wu_evt : 0x%08x ADDR_MASK reg : 0x%08x\n",
517 wu_evt, ioread32(&hw->reg->ADDR_MASK));
518
519 if (wu_evt) {
520
521 addr_mask = ioread32(&hw->reg->ADDR_MASK);
522 iowrite32(addr_mask, &hw->reg->WOL_ADDR_MASK);
523
524 pch_gbe_wait_clr_bit(&hw->reg->WOL_ADDR_MASK, PCH_GBE_WLA_BUSY);
525 iowrite32(0, &hw->reg->WOL_ST);
526 iowrite32((wu_evt | PCH_GBE_WLC_WOL_MODE), &hw->reg->WOL_CTRL);
527 iowrite32(0x02, &hw->reg->TCPIP_ACC);
528 iowrite32(PCH_GBE_INT_ENABLE_MASK, &hw->reg->INT_EN);
529 } else {
530 iowrite32(0, &hw->reg->WOL_CTRL);
531 iowrite32(0, &hw->reg->WOL_ST);
532 }
533 return;
534}
535
536
537
538
539
540
541
542
543
544
545
546u16 pch_gbe_mac_ctrl_miim(struct pch_gbe_hw *hw, u32 addr, u32 dir, u32 reg,
547 u16 data)
548{
549 u32 data_out = 0;
550 unsigned int i;
551 unsigned long flags;
552
553 spin_lock_irqsave(&hw->miim_lock, flags);
554
555 for (i = 100; i; --i) {
556 if ((ioread32(&hw->reg->MIIM) & PCH_GBE_MIIM_OPER_READY))
557 break;
558 udelay(20);
559 }
560 if (i == 0) {
561 pr_err("pch-gbe.miim won't go Ready\n");
562 spin_unlock_irqrestore(&hw->miim_lock, flags);
563 return 0;
564 }
565 iowrite32(((reg << PCH_GBE_MIIM_REG_ADDR_SHIFT) |
566 (addr << PCH_GBE_MIIM_PHY_ADDR_SHIFT) |
567 dir | data), &hw->reg->MIIM);
568 for (i = 0; i < 100; i++) {
569 udelay(20);
570 data_out = ioread32(&hw->reg->MIIM);
571 if ((data_out & PCH_GBE_MIIM_OPER_READY))
572 break;
573 }
574 spin_unlock_irqrestore(&hw->miim_lock, flags);
575
576 pr_debug("PHY %s: reg=%d, data=0x%04X\n",
577 dir == PCH_GBE_MIIM_OPER_READ ? "READ" : "WRITE", reg,
578 dir == PCH_GBE_MIIM_OPER_READ ? data_out : data);
579 return (u16) data_out;
580}
581
582
583
584
585
586static void pch_gbe_mac_set_pause_packet(struct pch_gbe_hw *hw)
587{
588 unsigned long tmp2, tmp3;
589
590
591 tmp2 = hw->mac.addr[1];
592 tmp2 = (tmp2 << 8) | hw->mac.addr[0];
593 tmp2 = PCH_GBE_PAUSE_PKT2_VALUE | (tmp2 << 16);
594
595 tmp3 = hw->mac.addr[5];
596 tmp3 = (tmp3 << 8) | hw->mac.addr[4];
597 tmp3 = (tmp3 << 8) | hw->mac.addr[3];
598 tmp3 = (tmp3 << 8) | hw->mac.addr[2];
599
600 iowrite32(PCH_GBE_PAUSE_PKT1_VALUE, &hw->reg->PAUSE_PKT1);
601 iowrite32(tmp2, &hw->reg->PAUSE_PKT2);
602 iowrite32(tmp3, &hw->reg->PAUSE_PKT3);
603 iowrite32(PCH_GBE_PAUSE_PKT4_VALUE, &hw->reg->PAUSE_PKT4);
604 iowrite32(PCH_GBE_PAUSE_PKT5_VALUE, &hw->reg->PAUSE_PKT5);
605
606
607 iowrite32(PCH_GBE_PS_PKT_RQ, &hw->reg->PAUSE_REQ);
608
609 pr_debug("PAUSE_PKT1-5 reg : 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
610 ioread32(&hw->reg->PAUSE_PKT1), ioread32(&hw->reg->PAUSE_PKT2),
611 ioread32(&hw->reg->PAUSE_PKT3), ioread32(&hw->reg->PAUSE_PKT4),
612 ioread32(&hw->reg->PAUSE_PKT5));
613
614 return;
615}
616
617
618
619
620
621
622
623
624
625static int pch_gbe_alloc_queues(struct pch_gbe_adapter *adapter)
626{
627 adapter->tx_ring = kzalloc(sizeof(*adapter->tx_ring), GFP_KERNEL);
628 if (!adapter->tx_ring)
629 return -ENOMEM;
630
631 adapter->rx_ring = kzalloc(sizeof(*adapter->rx_ring), GFP_KERNEL);
632 if (!adapter->rx_ring) {
633 kfree(adapter->tx_ring);
634 return -ENOMEM;
635 }
636 return 0;
637}
638
639
640
641
642
643static void pch_gbe_init_stats(struct pch_gbe_adapter *adapter)
644{
645 memset(&adapter->stats, 0, sizeof(adapter->stats));
646 return;
647}
648
649
650
651
652
653
654
655
656static int pch_gbe_init_phy(struct pch_gbe_adapter *adapter)
657{
658 struct net_device *netdev = adapter->netdev;
659 u32 addr;
660 u16 bmcr, stat;
661
662
663 for (addr = 0; addr < PCH_GBE_PHY_REGS_LEN; addr++) {
664 adapter->mii.phy_id = (addr == 0) ? 1 : (addr == 1) ? 0 : addr;
665 bmcr = pch_gbe_mdio_read(netdev, adapter->mii.phy_id, MII_BMCR);
666 stat = pch_gbe_mdio_read(netdev, adapter->mii.phy_id, MII_BMSR);
667 stat = pch_gbe_mdio_read(netdev, adapter->mii.phy_id, MII_BMSR);
668 if (!((bmcr == 0xFFFF) || ((stat == 0) && (bmcr == 0))))
669 break;
670 }
671 adapter->hw.phy.addr = adapter->mii.phy_id;
672 pr_debug("phy_addr = %d\n", adapter->mii.phy_id);
673 if (addr == 32)
674 return -EAGAIN;
675
676 for (addr = 0; addr < PCH_GBE_PHY_REGS_LEN; addr++) {
677 if (addr != adapter->mii.phy_id) {
678 pch_gbe_mdio_write(netdev, addr, MII_BMCR,
679 BMCR_ISOLATE);
680 } else {
681 bmcr = pch_gbe_mdio_read(netdev, addr, MII_BMCR);
682 pch_gbe_mdio_write(netdev, addr, MII_BMCR,
683 bmcr & ~BMCR_ISOLATE);
684 }
685 }
686
687
688 adapter->mii.phy_id_mask = 0x1F;
689 adapter->mii.reg_num_mask = 0x1F;
690 adapter->mii.dev = adapter->netdev;
691 adapter->mii.mdio_read = pch_gbe_mdio_read;
692 adapter->mii.mdio_write = pch_gbe_mdio_write;
693 adapter->mii.supports_gmii = mii_check_gmii_support(&adapter->mii);
694 return 0;
695}
696
697
698
699
700
701
702
703
704
705
706static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg)
707{
708 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
709 struct pch_gbe_hw *hw = &adapter->hw;
710
711 return pch_gbe_mac_ctrl_miim(hw, addr, PCH_GBE_HAL_MIIM_READ, reg,
712 (u16) 0);
713}
714
715
716
717
718
719
720
721
722static void pch_gbe_mdio_write(struct net_device *netdev,
723 int addr, int reg, int data)
724{
725 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
726 struct pch_gbe_hw *hw = &adapter->hw;
727
728 pch_gbe_mac_ctrl_miim(hw, addr, PCH_GBE_HAL_MIIM_WRITE, reg, data);
729}
730
731
732
733
734
735static void pch_gbe_reset_task(struct work_struct *work)
736{
737 struct pch_gbe_adapter *adapter;
738 adapter = container_of(work, struct pch_gbe_adapter, reset_task);
739
740 rtnl_lock();
741 pch_gbe_reinit_locked(adapter);
742 rtnl_unlock();
743}
744
745
746
747
748
749void pch_gbe_reinit_locked(struct pch_gbe_adapter *adapter)
750{
751 pch_gbe_down(adapter);
752 pch_gbe_up(adapter);
753}
754
755
756
757
758
759void pch_gbe_reset(struct pch_gbe_adapter *adapter)
760{
761 pch_gbe_mac_reset_hw(&adapter->hw);
762
763 pch_gbe_set_multi(adapter->netdev);
764
765 pch_gbe_mac_init_rx_addrs(&adapter->hw, PCH_GBE_MAR_ENTRIES);
766 if (pch_gbe_hal_init_hw(&adapter->hw))
767 pr_err("Hardware Error\n");
768}
769
770
771
772
773
774static void pch_gbe_free_irq(struct pch_gbe_adapter *adapter)
775{
776 struct net_device *netdev = adapter->netdev;
777
778 free_irq(adapter->pdev->irq, netdev);
779 if (adapter->have_msi) {
780 pci_disable_msi(adapter->pdev);
781 pr_debug("call pci_disable_msi\n");
782 }
783}
784
785
786
787
788
789static void pch_gbe_irq_disable(struct pch_gbe_adapter *adapter)
790{
791 struct pch_gbe_hw *hw = &adapter->hw;
792
793 atomic_inc(&adapter->irq_sem);
794 iowrite32(0, &hw->reg->INT_EN);
795 ioread32(&hw->reg->INT_ST);
796 synchronize_irq(adapter->pdev->irq);
797
798 pr_debug("INT_EN reg : 0x%08x\n", ioread32(&hw->reg->INT_EN));
799}
800
801
802
803
804
805static void pch_gbe_irq_enable(struct pch_gbe_adapter *adapter)
806{
807 struct pch_gbe_hw *hw = &adapter->hw;
808
809 if (likely(atomic_dec_and_test(&adapter->irq_sem)))
810 iowrite32(PCH_GBE_INT_ENABLE_MASK, &hw->reg->INT_EN);
811 ioread32(&hw->reg->INT_ST);
812 pr_debug("INT_EN reg : 0x%08x\n", ioread32(&hw->reg->INT_EN));
813}
814
815
816
817
818
819
820
821static void pch_gbe_setup_tctl(struct pch_gbe_adapter *adapter)
822{
823 struct pch_gbe_hw *hw = &adapter->hw;
824 u32 tx_mode, tcpip;
825
826 tx_mode = PCH_GBE_TM_LONG_PKT |
827 PCH_GBE_TM_ST_AND_FD |
828 PCH_GBE_TM_SHORT_PKT |
829 PCH_GBE_TM_TH_TX_STRT_8 |
830 PCH_GBE_TM_TH_ALM_EMP_4 | PCH_GBE_TM_TH_ALM_FULL_8;
831
832 iowrite32(tx_mode, &hw->reg->TX_MODE);
833
834 tcpip = ioread32(&hw->reg->TCPIP_ACC);
835 tcpip |= PCH_GBE_TX_TCPIPACC_EN;
836 iowrite32(tcpip, &hw->reg->TCPIP_ACC);
837 return;
838}
839
840
841
842
843
844static void pch_gbe_configure_tx(struct pch_gbe_adapter *adapter)
845{
846 struct pch_gbe_hw *hw = &adapter->hw;
847 u32 tdba, tdlen, dctrl;
848
849 pr_debug("dma addr = 0x%08llx size = 0x%08x\n",
850 (unsigned long long)adapter->tx_ring->dma,
851 adapter->tx_ring->size);
852
853
854 tdba = adapter->tx_ring->dma;
855 tdlen = adapter->tx_ring->size - 0x10;
856 iowrite32(tdba, &hw->reg->TX_DSC_BASE);
857 iowrite32(tdlen, &hw->reg->TX_DSC_SIZE);
858 iowrite32(tdba, &hw->reg->TX_DSC_SW_P);
859
860
861 dctrl = ioread32(&hw->reg->DMA_CTRL);
862 dctrl |= PCH_GBE_TX_DMA_EN;
863 iowrite32(dctrl, &hw->reg->DMA_CTRL);
864}
865
866
867
868
869
870static void pch_gbe_setup_rctl(struct pch_gbe_adapter *adapter)
871{
872 struct pch_gbe_hw *hw = &adapter->hw;
873 u32 rx_mode, tcpip;
874
875 rx_mode = PCH_GBE_ADD_FIL_EN | PCH_GBE_MLT_FIL_EN |
876 PCH_GBE_RH_ALM_EMP_4 | PCH_GBE_RH_ALM_FULL_4 | PCH_GBE_RH_RD_TRG_8;
877
878 iowrite32(rx_mode, &hw->reg->RX_MODE);
879
880 tcpip = ioread32(&hw->reg->TCPIP_ACC);
881
882 tcpip |= PCH_GBE_RX_TCPIPACC_OFF;
883 tcpip &= ~PCH_GBE_RX_TCPIPACC_EN;
884 iowrite32(tcpip, &hw->reg->TCPIP_ACC);
885 return;
886}
887
888
889
890
891
892static void pch_gbe_configure_rx(struct pch_gbe_adapter *adapter)
893{
894 struct pch_gbe_hw *hw = &adapter->hw;
895 u32 rdba, rdlen, rxdma;
896
897 pr_debug("dma adr = 0x%08llx size = 0x%08x\n",
898 (unsigned long long)adapter->rx_ring->dma,
899 adapter->rx_ring->size);
900
901 pch_gbe_mac_force_mac_fc(hw);
902
903 pch_gbe_disable_mac_rx(hw);
904
905
906 rxdma = ioread32(&hw->reg->DMA_CTRL);
907 rxdma &= ~PCH_GBE_RX_DMA_EN;
908 iowrite32(rxdma, &hw->reg->DMA_CTRL);
909
910 pr_debug("MAC_RX_EN reg = 0x%08x DMA_CTRL reg = 0x%08x\n",
911 ioread32(&hw->reg->MAC_RX_EN),
912 ioread32(&hw->reg->DMA_CTRL));
913
914
915
916 rdba = adapter->rx_ring->dma;
917 rdlen = adapter->rx_ring->size - 0x10;
918 iowrite32(rdba, &hw->reg->RX_DSC_BASE);
919 iowrite32(rdlen, &hw->reg->RX_DSC_SIZE);
920 iowrite32((rdba + rdlen), &hw->reg->RX_DSC_SW_P);
921}
922
923
924
925
926
927
928static void pch_gbe_unmap_and_free_tx_resource(
929 struct pch_gbe_adapter *adapter, struct pch_gbe_buffer *buffer_info)
930{
931 if (buffer_info->mapped) {
932 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
933 buffer_info->length, DMA_TO_DEVICE);
934 buffer_info->mapped = false;
935 }
936 if (buffer_info->skb) {
937 dev_kfree_skb_any(buffer_info->skb);
938 buffer_info->skb = NULL;
939 }
940}
941
942
943
944
945
946
947static void pch_gbe_unmap_and_free_rx_resource(
948 struct pch_gbe_adapter *adapter,
949 struct pch_gbe_buffer *buffer_info)
950{
951 if (buffer_info->mapped) {
952 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
953 buffer_info->length, DMA_FROM_DEVICE);
954 buffer_info->mapped = false;
955 }
956 if (buffer_info->skb) {
957 dev_kfree_skb_any(buffer_info->skb);
958 buffer_info->skb = NULL;
959 }
960}
961
962
963
964
965
966
967static void pch_gbe_clean_tx_ring(struct pch_gbe_adapter *adapter,
968 struct pch_gbe_tx_ring *tx_ring)
969{
970 struct pch_gbe_hw *hw = &adapter->hw;
971 struct pch_gbe_buffer *buffer_info;
972 unsigned long size;
973 unsigned int i;
974
975
976 for (i = 0; i < tx_ring->count; i++) {
977 buffer_info = &tx_ring->buffer_info[i];
978 pch_gbe_unmap_and_free_tx_resource(adapter, buffer_info);
979 }
980 pr_debug("call pch_gbe_unmap_and_free_tx_resource() %d count\n", i);
981
982 size = (unsigned long)sizeof(struct pch_gbe_buffer) * tx_ring->count;
983 memset(tx_ring->buffer_info, 0, size);
984
985
986 memset(tx_ring->desc, 0, tx_ring->size);
987 tx_ring->next_to_use = 0;
988 tx_ring->next_to_clean = 0;
989 iowrite32(tx_ring->dma, &hw->reg->TX_DSC_HW_P);
990 iowrite32((tx_ring->size - 0x10), &hw->reg->TX_DSC_SIZE);
991}
992
993
994
995
996
997
998static void
999pch_gbe_clean_rx_ring(struct pch_gbe_adapter *adapter,
1000 struct pch_gbe_rx_ring *rx_ring)
1001{
1002 struct pch_gbe_hw *hw = &adapter->hw;
1003 struct pch_gbe_buffer *buffer_info;
1004 unsigned long size;
1005 unsigned int i;
1006
1007
1008 for (i = 0; i < rx_ring->count; i++) {
1009 buffer_info = &rx_ring->buffer_info[i];
1010 pch_gbe_unmap_and_free_rx_resource(adapter, buffer_info);
1011 }
1012 pr_debug("call pch_gbe_unmap_and_free_rx_resource() %d count\n", i);
1013 size = (unsigned long)sizeof(struct pch_gbe_buffer) * rx_ring->count;
1014 memset(rx_ring->buffer_info, 0, size);
1015
1016
1017 memset(rx_ring->desc, 0, rx_ring->size);
1018 rx_ring->next_to_clean = 0;
1019 rx_ring->next_to_use = 0;
1020 iowrite32(rx_ring->dma, &hw->reg->RX_DSC_HW_P);
1021 iowrite32((rx_ring->size - 0x10), &hw->reg->RX_DSC_SIZE);
1022}
1023
1024static void pch_gbe_set_rgmii_ctrl(struct pch_gbe_adapter *adapter, u16 speed,
1025 u16 duplex)
1026{
1027 struct pch_gbe_hw *hw = &adapter->hw;
1028 unsigned long rgmii = 0;
1029
1030
1031#ifdef PCH_GBE_MAC_IFOP_RGMII
1032 switch (speed) {
1033 case SPEED_10:
1034 rgmii = (PCH_GBE_RGMII_RATE_2_5M |
1035 PCH_GBE_MAC_RGMII_CTRL_SETTING);
1036 break;
1037 case SPEED_100:
1038 rgmii = (PCH_GBE_RGMII_RATE_25M |
1039 PCH_GBE_MAC_RGMII_CTRL_SETTING);
1040 break;
1041 case SPEED_1000:
1042 rgmii = (PCH_GBE_RGMII_RATE_125M |
1043 PCH_GBE_MAC_RGMII_CTRL_SETTING);
1044 break;
1045 }
1046 iowrite32(rgmii, &hw->reg->RGMII_CTRL);
1047#else
1048 rgmii = 0;
1049 iowrite32(rgmii, &hw->reg->RGMII_CTRL);
1050#endif
1051}
1052static void pch_gbe_set_mode(struct pch_gbe_adapter *adapter, u16 speed,
1053 u16 duplex)
1054{
1055 struct net_device *netdev = adapter->netdev;
1056 struct pch_gbe_hw *hw = &adapter->hw;
1057 unsigned long mode = 0;
1058
1059
1060 switch (speed) {
1061 case SPEED_10:
1062 mode = PCH_GBE_MODE_MII_ETHER;
1063 netdev->tx_queue_len = 10;
1064 break;
1065 case SPEED_100:
1066 mode = PCH_GBE_MODE_MII_ETHER;
1067 netdev->tx_queue_len = 100;
1068 break;
1069 case SPEED_1000:
1070 mode = PCH_GBE_MODE_GMII_ETHER;
1071 break;
1072 }
1073 if (duplex == DUPLEX_FULL)
1074 mode |= PCH_GBE_MODE_FULL_DUPLEX;
1075 else
1076 mode |= PCH_GBE_MODE_HALF_DUPLEX;
1077 iowrite32(mode, &hw->reg->MODE);
1078}
1079
1080
1081
1082
1083
1084static void pch_gbe_watchdog(unsigned long data)
1085{
1086 struct pch_gbe_adapter *adapter = (struct pch_gbe_adapter *)data;
1087 struct net_device *netdev = adapter->netdev;
1088 struct pch_gbe_hw *hw = &adapter->hw;
1089
1090 pr_debug("right now = %ld\n", jiffies);
1091
1092 pch_gbe_update_stats(adapter);
1093 if ((mii_link_ok(&adapter->mii)) && (!netif_carrier_ok(netdev))) {
1094 struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET };
1095 netdev->tx_queue_len = adapter->tx_queue_len;
1096
1097 if (mii_ethtool_gset(&adapter->mii, &cmd)) {
1098 pr_err("ethtool get setting Error\n");
1099 mod_timer(&adapter->watchdog_timer,
1100 round_jiffies(jiffies +
1101 PCH_GBE_WATCHDOG_PERIOD));
1102 return;
1103 }
1104 hw->mac.link_speed = ethtool_cmd_speed(&cmd);
1105 hw->mac.link_duplex = cmd.duplex;
1106
1107 pch_gbe_set_rgmii_ctrl(adapter, hw->mac.link_speed,
1108 hw->mac.link_duplex);
1109
1110 pch_gbe_set_mode(adapter, hw->mac.link_speed,
1111 hw->mac.link_duplex);
1112 netdev_dbg(netdev,
1113 "Link is Up %d Mbps %s-Duplex\n",
1114 hw->mac.link_speed,
1115 cmd.duplex == DUPLEX_FULL ? "Full" : "Half");
1116 netif_carrier_on(netdev);
1117 netif_wake_queue(netdev);
1118 } else if ((!mii_link_ok(&adapter->mii)) &&
1119 (netif_carrier_ok(netdev))) {
1120 netdev_dbg(netdev, "NIC Link is Down\n");
1121 hw->mac.link_speed = SPEED_10;
1122 hw->mac.link_duplex = DUPLEX_HALF;
1123 netif_carrier_off(netdev);
1124 netif_stop_queue(netdev);
1125 }
1126 mod_timer(&adapter->watchdog_timer,
1127 round_jiffies(jiffies + PCH_GBE_WATCHDOG_PERIOD));
1128}
1129
1130
1131
1132
1133
1134
1135
1136static void pch_gbe_tx_queue(struct pch_gbe_adapter *adapter,
1137 struct pch_gbe_tx_ring *tx_ring,
1138 struct sk_buff *skb)
1139{
1140 struct pch_gbe_hw *hw = &adapter->hw;
1141 struct pch_gbe_tx_desc *tx_desc;
1142 struct pch_gbe_buffer *buffer_info;
1143 struct sk_buff *tmp_skb;
1144 unsigned int frame_ctrl;
1145 unsigned int ring_num;
1146
1147
1148 frame_ctrl = 0;
1149 if (unlikely(skb->len < PCH_GBE_SHORT_PKT))
1150 frame_ctrl |= PCH_GBE_TXD_CTRL_APAD;
1151 if (skb->ip_summed == CHECKSUM_NONE)
1152 frame_ctrl |= PCH_GBE_TXD_CTRL_TCPIP_ACC_OFF;
1153
1154
1155
1156
1157
1158
1159 if (skb->len < PCH_GBE_SHORT_PKT && skb->ip_summed != CHECKSUM_NONE) {
1160 frame_ctrl |= PCH_GBE_TXD_CTRL_APAD |
1161 PCH_GBE_TXD_CTRL_TCPIP_ACC_OFF;
1162 if (skb->protocol == htons(ETH_P_IP)) {
1163 struct iphdr *iph = ip_hdr(skb);
1164 unsigned int offset;
1165 offset = skb_transport_offset(skb);
1166 if (iph->protocol == IPPROTO_TCP) {
1167 skb->csum = 0;
1168 tcp_hdr(skb)->check = 0;
1169 skb->csum = skb_checksum(skb, offset,
1170 skb->len - offset, 0);
1171 tcp_hdr(skb)->check =
1172 csum_tcpudp_magic(iph->saddr,
1173 iph->daddr,
1174 skb->len - offset,
1175 IPPROTO_TCP,
1176 skb->csum);
1177 } else if (iph->protocol == IPPROTO_UDP) {
1178 skb->csum = 0;
1179 udp_hdr(skb)->check = 0;
1180 skb->csum =
1181 skb_checksum(skb, offset,
1182 skb->len - offset, 0);
1183 udp_hdr(skb)->check =
1184 csum_tcpudp_magic(iph->saddr,
1185 iph->daddr,
1186 skb->len - offset,
1187 IPPROTO_UDP,
1188 skb->csum);
1189 }
1190 }
1191 }
1192
1193 ring_num = tx_ring->next_to_use;
1194 if (unlikely((ring_num + 1) == tx_ring->count))
1195 tx_ring->next_to_use = 0;
1196 else
1197 tx_ring->next_to_use = ring_num + 1;
1198
1199
1200 buffer_info = &tx_ring->buffer_info[ring_num];
1201 tmp_skb = buffer_info->skb;
1202
1203
1204 memcpy(tmp_skb->data, skb->data, ETH_HLEN);
1205 tmp_skb->data[ETH_HLEN] = 0x00;
1206 tmp_skb->data[ETH_HLEN + 1] = 0x00;
1207 tmp_skb->len = skb->len;
1208 memcpy(&tmp_skb->data[ETH_HLEN + 2], &skb->data[ETH_HLEN],
1209 (skb->len - ETH_HLEN));
1210
1211 buffer_info->length = tmp_skb->len;
1212 buffer_info->dma = dma_map_single(&adapter->pdev->dev, tmp_skb->data,
1213 buffer_info->length,
1214 DMA_TO_DEVICE);
1215 if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) {
1216 pr_err("TX DMA map failed\n");
1217 buffer_info->dma = 0;
1218 buffer_info->time_stamp = 0;
1219 tx_ring->next_to_use = ring_num;
1220 return;
1221 }
1222 buffer_info->mapped = true;
1223 buffer_info->time_stamp = jiffies;
1224
1225
1226 tx_desc = PCH_GBE_TX_DESC(*tx_ring, ring_num);
1227 tx_desc->buffer_addr = (buffer_info->dma);
1228 tx_desc->length = (tmp_skb->len);
1229 tx_desc->tx_words_eob = ((tmp_skb->len + 3));
1230 tx_desc->tx_frame_ctrl = (frame_ctrl);
1231 tx_desc->gbec_status = (DSC_INIT16);
1232
1233 if (unlikely(++ring_num == tx_ring->count))
1234 ring_num = 0;
1235
1236
1237 iowrite32(tx_ring->dma +
1238 (int)sizeof(struct pch_gbe_tx_desc) * ring_num,
1239 &hw->reg->TX_DSC_SW_P);
1240
1241 pch_tx_timestamp(adapter, skb);
1242
1243 dev_kfree_skb_any(skb);
1244}
1245
1246
1247
1248
1249
1250void pch_gbe_update_stats(struct pch_gbe_adapter *adapter)
1251{
1252 struct net_device *netdev = adapter->netdev;
1253 struct pci_dev *pdev = adapter->pdev;
1254 struct pch_gbe_hw_stats *stats = &adapter->stats;
1255 unsigned long flags;
1256
1257
1258
1259
1260
1261 if ((pdev->error_state) && (pdev->error_state != pci_channel_io_normal))
1262 return;
1263
1264 spin_lock_irqsave(&adapter->stats_lock, flags);
1265
1266
1267 stats->rx_errors = stats->rx_crc_errors + stats->rx_frame_errors;
1268 stats->tx_errors = stats->tx_length_errors +
1269 stats->tx_aborted_errors +
1270 stats->tx_carrier_errors + stats->tx_timeout_count;
1271
1272
1273 netdev->stats.rx_packets = stats->rx_packets;
1274 netdev->stats.rx_bytes = stats->rx_bytes;
1275 netdev->stats.rx_dropped = stats->rx_dropped;
1276 netdev->stats.tx_packets = stats->tx_packets;
1277 netdev->stats.tx_bytes = stats->tx_bytes;
1278 netdev->stats.tx_dropped = stats->tx_dropped;
1279
1280 netdev->stats.multicast = stats->multicast;
1281 netdev->stats.collisions = stats->collisions;
1282
1283 netdev->stats.rx_errors = stats->rx_errors;
1284 netdev->stats.rx_crc_errors = stats->rx_crc_errors;
1285 netdev->stats.rx_frame_errors = stats->rx_frame_errors;
1286
1287 netdev->stats.tx_errors = stats->tx_errors;
1288 netdev->stats.tx_aborted_errors = stats->tx_aborted_errors;
1289 netdev->stats.tx_carrier_errors = stats->tx_carrier_errors;
1290
1291 spin_unlock_irqrestore(&adapter->stats_lock, flags);
1292}
1293
1294static void pch_gbe_disable_dma_rx(struct pch_gbe_hw *hw)
1295{
1296 u32 rxdma;
1297
1298
1299 rxdma = ioread32(&hw->reg->DMA_CTRL);
1300 rxdma &= ~PCH_GBE_RX_DMA_EN;
1301 iowrite32(rxdma, &hw->reg->DMA_CTRL);
1302}
1303
1304static void pch_gbe_enable_dma_rx(struct pch_gbe_hw *hw)
1305{
1306 u32 rxdma;
1307
1308
1309 rxdma = ioread32(&hw->reg->DMA_CTRL);
1310 rxdma |= PCH_GBE_RX_DMA_EN;
1311 iowrite32(rxdma, &hw->reg->DMA_CTRL);
1312}
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322static irqreturn_t pch_gbe_intr(int irq, void *data)
1323{
1324 struct net_device *netdev = data;
1325 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
1326 struct pch_gbe_hw *hw = &adapter->hw;
1327 u32 int_st;
1328 u32 int_en;
1329
1330
1331 int_st = ioread32(&hw->reg->INT_ST);
1332 int_st = int_st & ioread32(&hw->reg->INT_EN);
1333
1334 if (unlikely(!int_st))
1335 return IRQ_NONE;
1336 pr_debug("%s occur int_st = 0x%08x\n", __func__, int_st);
1337 if (int_st & PCH_GBE_INT_RX_FRAME_ERR)
1338 adapter->stats.intr_rx_frame_err_count++;
1339 if (int_st & PCH_GBE_INT_RX_FIFO_ERR)
1340 if (!adapter->rx_stop_flag) {
1341 adapter->stats.intr_rx_fifo_err_count++;
1342 pr_debug("Rx fifo over run\n");
1343 adapter->rx_stop_flag = true;
1344 int_en = ioread32(&hw->reg->INT_EN);
1345 iowrite32((int_en & ~PCH_GBE_INT_RX_FIFO_ERR),
1346 &hw->reg->INT_EN);
1347 pch_gbe_disable_dma_rx(&adapter->hw);
1348 int_st |= ioread32(&hw->reg->INT_ST);
1349 int_st = int_st & ioread32(&hw->reg->INT_EN);
1350 }
1351 if (int_st & PCH_GBE_INT_RX_DMA_ERR)
1352 adapter->stats.intr_rx_dma_err_count++;
1353 if (int_st & PCH_GBE_INT_TX_FIFO_ERR)
1354 adapter->stats.intr_tx_fifo_err_count++;
1355 if (int_st & PCH_GBE_INT_TX_DMA_ERR)
1356 adapter->stats.intr_tx_dma_err_count++;
1357 if (int_st & PCH_GBE_INT_TCPIP_ERR)
1358 adapter->stats.intr_tcpip_err_count++;
1359
1360 if ((int_st & PCH_GBE_INT_RX_DSC_EMP)) {
1361 adapter->stats.intr_rx_dsc_empty_count++;
1362 pr_debug("Rx descriptor is empty\n");
1363 int_en = ioread32(&hw->reg->INT_EN);
1364 iowrite32((int_en & ~PCH_GBE_INT_RX_DSC_EMP), &hw->reg->INT_EN);
1365 if (hw->mac.tx_fc_enable) {
1366
1367 pch_gbe_mac_set_pause_packet(hw);
1368 }
1369 }
1370
1371
1372 if ((int_st & (PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT)) ||
1373 (adapter->rx_stop_flag)) {
1374 if (likely(napi_schedule_prep(&adapter->napi))) {
1375
1376 atomic_inc(&adapter->irq_sem);
1377 int_en = ioread32(&hw->reg->INT_EN);
1378 int_en &=
1379 ~(PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT);
1380 iowrite32(int_en, &hw->reg->INT_EN);
1381
1382 __napi_schedule(&adapter->napi);
1383 }
1384 }
1385 pr_debug("return = 0x%08x INT_EN reg = 0x%08x\n",
1386 IRQ_HANDLED, ioread32(&hw->reg->INT_EN));
1387 return IRQ_HANDLED;
1388}
1389
1390
1391
1392
1393
1394
1395
1396static void
1397pch_gbe_alloc_rx_buffers(struct pch_gbe_adapter *adapter,
1398 struct pch_gbe_rx_ring *rx_ring, int cleaned_count)
1399{
1400 struct net_device *netdev = adapter->netdev;
1401 struct pci_dev *pdev = adapter->pdev;
1402 struct pch_gbe_hw *hw = &adapter->hw;
1403 struct pch_gbe_rx_desc *rx_desc;
1404 struct pch_gbe_buffer *buffer_info;
1405 struct sk_buff *skb;
1406 unsigned int i;
1407 unsigned int bufsz;
1408
1409 bufsz = adapter->rx_buffer_len + NET_IP_ALIGN;
1410 i = rx_ring->next_to_use;
1411
1412 while ((cleaned_count--)) {
1413 buffer_info = &rx_ring->buffer_info[i];
1414 skb = netdev_alloc_skb(netdev, bufsz);
1415 if (unlikely(!skb)) {
1416
1417 adapter->stats.rx_alloc_buff_failed++;
1418 break;
1419 }
1420
1421 skb_reserve(skb, NET_IP_ALIGN);
1422 buffer_info->skb = skb;
1423
1424 buffer_info->dma = dma_map_single(&pdev->dev,
1425 buffer_info->rx_buffer,
1426 buffer_info->length,
1427 DMA_FROM_DEVICE);
1428 if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) {
1429 dev_kfree_skb(skb);
1430 buffer_info->skb = NULL;
1431 buffer_info->dma = 0;
1432 adapter->stats.rx_alloc_buff_failed++;
1433 break;
1434 }
1435 buffer_info->mapped = true;
1436 rx_desc = PCH_GBE_RX_DESC(*rx_ring, i);
1437 rx_desc->buffer_addr = (buffer_info->dma);
1438 rx_desc->gbec_status = DSC_INIT16;
1439
1440 pr_debug("i = %d buffer_info->dma = 0x08%llx buffer_info->length = 0x%x\n",
1441 i, (unsigned long long)buffer_info->dma,
1442 buffer_info->length);
1443
1444 if (unlikely(++i == rx_ring->count))
1445 i = 0;
1446 }
1447 if (likely(rx_ring->next_to_use != i)) {
1448 rx_ring->next_to_use = i;
1449 if (unlikely(i-- == 0))
1450 i = (rx_ring->count - 1);
1451 iowrite32(rx_ring->dma +
1452 (int)sizeof(struct pch_gbe_rx_desc) * i,
1453 &hw->reg->RX_DSC_SW_P);
1454 }
1455 return;
1456}
1457
1458static int
1459pch_gbe_alloc_rx_buffers_pool(struct pch_gbe_adapter *adapter,
1460 struct pch_gbe_rx_ring *rx_ring, int cleaned_count)
1461{
1462 struct pci_dev *pdev = adapter->pdev;
1463 struct pch_gbe_buffer *buffer_info;
1464 unsigned int i;
1465 unsigned int bufsz;
1466 unsigned int size;
1467
1468 bufsz = adapter->rx_buffer_len;
1469
1470 size = rx_ring->count * bufsz + PCH_GBE_RESERVE_MEMORY;
1471 rx_ring->rx_buff_pool = dma_alloc_coherent(&pdev->dev, size,
1472 &rx_ring->rx_buff_pool_logic,
1473 GFP_KERNEL | __GFP_ZERO);
1474 if (!rx_ring->rx_buff_pool)
1475 return -ENOMEM;
1476
1477 rx_ring->rx_buff_pool_size = size;
1478 for (i = 0; i < rx_ring->count; i++) {
1479 buffer_info = &rx_ring->buffer_info[i];
1480 buffer_info->rx_buffer = rx_ring->rx_buff_pool + bufsz * i;
1481 buffer_info->length = bufsz;
1482 }
1483 return 0;
1484}
1485
1486
1487
1488
1489
1490
1491static void pch_gbe_alloc_tx_buffers(struct pch_gbe_adapter *adapter,
1492 struct pch_gbe_tx_ring *tx_ring)
1493{
1494 struct pch_gbe_buffer *buffer_info;
1495 struct sk_buff *skb;
1496 unsigned int i;
1497 unsigned int bufsz;
1498 struct pch_gbe_tx_desc *tx_desc;
1499
1500 bufsz =
1501 adapter->hw.mac.max_frame_size + PCH_GBE_DMA_ALIGN + NET_IP_ALIGN;
1502
1503 for (i = 0; i < tx_ring->count; i++) {
1504 buffer_info = &tx_ring->buffer_info[i];
1505 skb = netdev_alloc_skb(adapter->netdev, bufsz);
1506 skb_reserve(skb, PCH_GBE_DMA_ALIGN);
1507 buffer_info->skb = skb;
1508 tx_desc = PCH_GBE_TX_DESC(*tx_ring, i);
1509 tx_desc->gbec_status = (DSC_INIT16);
1510 }
1511 return;
1512}
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522static bool
1523pch_gbe_clean_tx(struct pch_gbe_adapter *adapter,
1524 struct pch_gbe_tx_ring *tx_ring)
1525{
1526 struct pch_gbe_tx_desc *tx_desc;
1527 struct pch_gbe_buffer *buffer_info;
1528 struct sk_buff *skb;
1529 unsigned int i;
1530 unsigned int cleaned_count = 0;
1531 bool cleaned = false;
1532 int unused, thresh;
1533
1534 pr_debug("next_to_clean : %d\n", tx_ring->next_to_clean);
1535
1536 i = tx_ring->next_to_clean;
1537 tx_desc = PCH_GBE_TX_DESC(*tx_ring, i);
1538 pr_debug("gbec_status:0x%04x dma_status:0x%04x\n",
1539 tx_desc->gbec_status, tx_desc->dma_status);
1540
1541 unused = PCH_GBE_DESC_UNUSED(tx_ring);
1542 thresh = tx_ring->count - PCH_GBE_TX_WEIGHT;
1543 if ((tx_desc->gbec_status == DSC_INIT16) && (unused < thresh))
1544 {
1545 int j, k;
1546 if (unused < 8) {
1547 pr_debug("clean_tx: transmit queue warning (%x,%x) unused=%d\n",
1548 tx_ring->next_to_clean,tx_ring->next_to_use,unused);
1549 }
1550
1551
1552 k = i;
1553 for (j = 0; j < PCH_GBE_TX_WEIGHT; j++)
1554 {
1555 tx_desc = PCH_GBE_TX_DESC(*tx_ring, k);
1556 if (tx_desc->gbec_status != DSC_INIT16) break;
1557 if (++k >= tx_ring->count) k = 0;
1558 }
1559 if (j < PCH_GBE_TX_WEIGHT) {
1560 pr_debug("clean_tx: unused=%d loops=%d found tx_desc[%x,%x:%x].gbec_status=%04x\n",
1561 unused,j, i,k, tx_ring->next_to_use, tx_desc->gbec_status);
1562 i = k;
1563 }
1564 }
1565
1566 while ((tx_desc->gbec_status & DSC_INIT16) == 0x0000) {
1567 pr_debug("gbec_status:0x%04x\n", tx_desc->gbec_status);
1568 buffer_info = &tx_ring->buffer_info[i];
1569 skb = buffer_info->skb;
1570 cleaned = true;
1571
1572 if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_ABT)) {
1573 adapter->stats.tx_aborted_errors++;
1574 pr_err("Transfer Abort Error\n");
1575 } else if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_CRSER)
1576 ) {
1577 adapter->stats.tx_carrier_errors++;
1578 pr_err("Transfer Carrier Sense Error\n");
1579 } else if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_EXCOL)
1580 ) {
1581 adapter->stats.tx_aborted_errors++;
1582 pr_err("Transfer Collision Abort Error\n");
1583 } else if ((tx_desc->gbec_status &
1584 (PCH_GBE_TXD_GMAC_STAT_SNGCOL |
1585 PCH_GBE_TXD_GMAC_STAT_MLTCOL))) {
1586 adapter->stats.collisions++;
1587 adapter->stats.tx_packets++;
1588 adapter->stats.tx_bytes += skb->len;
1589 pr_debug("Transfer Collision\n");
1590 } else if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_CMPLT)
1591 ) {
1592 adapter->stats.tx_packets++;
1593 adapter->stats.tx_bytes += skb->len;
1594 }
1595 if (buffer_info->mapped) {
1596 pr_debug("unmap buffer_info->dma : %d\n", i);
1597 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
1598 buffer_info->length, DMA_TO_DEVICE);
1599 buffer_info->mapped = false;
1600 }
1601 if (buffer_info->skb) {
1602 pr_debug("trim buffer_info->skb : %d\n", i);
1603 skb_trim(buffer_info->skb, 0);
1604 }
1605 tx_desc->gbec_status = DSC_INIT16;
1606 if (unlikely(++i == tx_ring->count))
1607 i = 0;
1608 tx_desc = PCH_GBE_TX_DESC(*tx_ring, i);
1609
1610
1611 if (cleaned_count++ == PCH_GBE_TX_WEIGHT) {
1612 cleaned = false;
1613 break;
1614 }
1615 }
1616 pr_debug("called pch_gbe_unmap_and_free_tx_resource() %d count\n",
1617 cleaned_count);
1618 if (cleaned_count > 0) {
1619
1620 spin_lock(&tx_ring->tx_lock);
1621 if (unlikely(cleaned && (netif_queue_stopped(adapter->netdev))))
1622 {
1623 netif_wake_queue(adapter->netdev);
1624 adapter->stats.tx_restart_count++;
1625 pr_debug("Tx wake queue\n");
1626 }
1627
1628 tx_ring->next_to_clean = i;
1629
1630 pr_debug("next_to_clean : %d\n", tx_ring->next_to_clean);
1631 spin_unlock(&tx_ring->tx_lock);
1632 }
1633 return cleaned;
1634}
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646static bool
1647pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
1648 struct pch_gbe_rx_ring *rx_ring,
1649 int *work_done, int work_to_do)
1650{
1651 struct net_device *netdev = adapter->netdev;
1652 struct pci_dev *pdev = adapter->pdev;
1653 struct pch_gbe_buffer *buffer_info;
1654 struct pch_gbe_rx_desc *rx_desc;
1655 u32 length;
1656 unsigned int i;
1657 unsigned int cleaned_count = 0;
1658 bool cleaned = false;
1659 struct sk_buff *skb;
1660 u8 dma_status;
1661 u16 gbec_status;
1662 u32 tcp_ip_status;
1663
1664 i = rx_ring->next_to_clean;
1665
1666 while (*work_done < work_to_do) {
1667
1668 rx_desc = PCH_GBE_RX_DESC(*rx_ring, i);
1669 if (rx_desc->gbec_status == DSC_INIT16)
1670 break;
1671 cleaned = true;
1672 cleaned_count++;
1673
1674 dma_status = rx_desc->dma_status;
1675 gbec_status = rx_desc->gbec_status;
1676 tcp_ip_status = rx_desc->tcp_ip_status;
1677 rx_desc->gbec_status = DSC_INIT16;
1678 buffer_info = &rx_ring->buffer_info[i];
1679 skb = buffer_info->skb;
1680 buffer_info->skb = NULL;
1681
1682
1683 dma_unmap_single(&pdev->dev, buffer_info->dma,
1684 buffer_info->length, DMA_FROM_DEVICE);
1685 buffer_info->mapped = false;
1686
1687 pr_debug("RxDecNo = 0x%04x Status[DMA:0x%02x GBE:0x%04x "
1688 "TCP:0x%08x] BufInf = 0x%p\n",
1689 i, dma_status, gbec_status, tcp_ip_status,
1690 buffer_info);
1691
1692 if (unlikely(gbec_status & PCH_GBE_RXD_GMAC_STAT_NOTOCTAL)) {
1693 adapter->stats.rx_frame_errors++;
1694 pr_err("Receive Not Octal Error\n");
1695 } else if (unlikely(gbec_status &
1696 PCH_GBE_RXD_GMAC_STAT_NBLERR)) {
1697 adapter->stats.rx_frame_errors++;
1698 pr_err("Receive Nibble Error\n");
1699 } else if (unlikely(gbec_status &
1700 PCH_GBE_RXD_GMAC_STAT_CRCERR)) {
1701 adapter->stats.rx_crc_errors++;
1702 pr_err("Receive CRC Error\n");
1703 } else {
1704
1705
1706 length = (rx_desc->rx_words_eob) - 3 - ETH_FCS_LEN;
1707 if (rx_desc->rx_words_eob & 0x02)
1708 length = length - 4;
1709
1710
1711
1712
1713 memcpy(skb->data, buffer_info->rx_buffer, length);
1714
1715
1716 adapter->stats.rx_bytes += length;
1717 adapter->stats.rx_packets++;
1718 if ((gbec_status & PCH_GBE_RXD_GMAC_STAT_MARMLT))
1719 adapter->stats.multicast++;
1720
1721 skb_put(skb, length);
1722
1723 pch_rx_timestamp(adapter, skb);
1724
1725 skb->protocol = eth_type_trans(skb, netdev);
1726 if (tcp_ip_status & PCH_GBE_RXD_ACC_STAT_TCPIPOK)
1727 skb->ip_summed = CHECKSUM_UNNECESSARY;
1728 else
1729 skb->ip_summed = CHECKSUM_NONE;
1730
1731 napi_gro_receive(&adapter->napi, skb);
1732 (*work_done)++;
1733 pr_debug("Receive skb->ip_summed: %d length: %d\n",
1734 skb->ip_summed, length);
1735 }
1736
1737 if (unlikely(cleaned_count >= PCH_GBE_RX_BUFFER_WRITE)) {
1738 pch_gbe_alloc_rx_buffers(adapter, rx_ring,
1739 cleaned_count);
1740 cleaned_count = 0;
1741 }
1742 if (++i == rx_ring->count)
1743 i = 0;
1744 }
1745 rx_ring->next_to_clean = i;
1746 if (cleaned_count)
1747 pch_gbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
1748 return cleaned;
1749}
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter,
1760 struct pch_gbe_tx_ring *tx_ring)
1761{
1762 struct pci_dev *pdev = adapter->pdev;
1763 struct pch_gbe_tx_desc *tx_desc;
1764 int size;
1765 int desNo;
1766
1767 size = (int)sizeof(struct pch_gbe_buffer) * tx_ring->count;
1768 tx_ring->buffer_info = vzalloc(size);
1769 if (!tx_ring->buffer_info)
1770 return -ENOMEM;
1771
1772 tx_ring->size = tx_ring->count * (int)sizeof(struct pch_gbe_tx_desc);
1773
1774 tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
1775 &tx_ring->dma,
1776 GFP_KERNEL | __GFP_ZERO);
1777 if (!tx_ring->desc) {
1778 vfree(tx_ring->buffer_info);
1779 return -ENOMEM;
1780 }
1781
1782 tx_ring->next_to_use = 0;
1783 tx_ring->next_to_clean = 0;
1784 spin_lock_init(&tx_ring->tx_lock);
1785
1786 for (desNo = 0; desNo < tx_ring->count; desNo++) {
1787 tx_desc = PCH_GBE_TX_DESC(*tx_ring, desNo);
1788 tx_desc->gbec_status = DSC_INIT16;
1789 }
1790 pr_debug("tx_ring->desc = 0x%p tx_ring->dma = 0x%08llx\n"
1791 "next_to_clean = 0x%08x next_to_use = 0x%08x\n",
1792 tx_ring->desc, (unsigned long long)tx_ring->dma,
1793 tx_ring->next_to_clean, tx_ring->next_to_use);
1794 return 0;
1795}
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805int pch_gbe_setup_rx_resources(struct pch_gbe_adapter *adapter,
1806 struct pch_gbe_rx_ring *rx_ring)
1807{
1808 struct pci_dev *pdev = adapter->pdev;
1809 struct pch_gbe_rx_desc *rx_desc;
1810 int size;
1811 int desNo;
1812
1813 size = (int)sizeof(struct pch_gbe_buffer) * rx_ring->count;
1814 rx_ring->buffer_info = vzalloc(size);
1815 if (!rx_ring->buffer_info)
1816 return -ENOMEM;
1817
1818 rx_ring->size = rx_ring->count * (int)sizeof(struct pch_gbe_rx_desc);
1819 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
1820 &rx_ring->dma,
1821 GFP_KERNEL | __GFP_ZERO);
1822 if (!rx_ring->desc) {
1823 vfree(rx_ring->buffer_info);
1824 return -ENOMEM;
1825 }
1826 rx_ring->next_to_clean = 0;
1827 rx_ring->next_to_use = 0;
1828 for (desNo = 0; desNo < rx_ring->count; desNo++) {
1829 rx_desc = PCH_GBE_RX_DESC(*rx_ring, desNo);
1830 rx_desc->gbec_status = DSC_INIT16;
1831 }
1832 pr_debug("rx_ring->desc = 0x%p rx_ring->dma = 0x%08llx "
1833 "next_to_clean = 0x%08x next_to_use = 0x%08x\n",
1834 rx_ring->desc, (unsigned long long)rx_ring->dma,
1835 rx_ring->next_to_clean, rx_ring->next_to_use);
1836 return 0;
1837}
1838
1839
1840
1841
1842
1843
1844void pch_gbe_free_tx_resources(struct pch_gbe_adapter *adapter,
1845 struct pch_gbe_tx_ring *tx_ring)
1846{
1847 struct pci_dev *pdev = adapter->pdev;
1848
1849 pch_gbe_clean_tx_ring(adapter, tx_ring);
1850 vfree(tx_ring->buffer_info);
1851 tx_ring->buffer_info = NULL;
1852 pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
1853 tx_ring->desc = NULL;
1854}
1855
1856
1857
1858
1859
1860
1861void pch_gbe_free_rx_resources(struct pch_gbe_adapter *adapter,
1862 struct pch_gbe_rx_ring *rx_ring)
1863{
1864 struct pci_dev *pdev = adapter->pdev;
1865
1866 pch_gbe_clean_rx_ring(adapter, rx_ring);
1867 vfree(rx_ring->buffer_info);
1868 rx_ring->buffer_info = NULL;
1869 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
1870 rx_ring->desc = NULL;
1871}
1872
1873
1874
1875
1876
1877
1878
1879
1880static int pch_gbe_request_irq(struct pch_gbe_adapter *adapter)
1881{
1882 struct net_device *netdev = adapter->netdev;
1883 int err;
1884 int flags;
1885
1886 flags = IRQF_SHARED;
1887 adapter->have_msi = false;
1888 err = pci_enable_msi(adapter->pdev);
1889 pr_debug("call pci_enable_msi\n");
1890 if (err) {
1891 pr_debug("call pci_enable_msi - Error: %d\n", err);
1892 } else {
1893 flags = 0;
1894 adapter->have_msi = true;
1895 }
1896 err = request_irq(adapter->pdev->irq, &pch_gbe_intr,
1897 flags, netdev->name, netdev);
1898 if (err)
1899 pr_err("Unable to allocate interrupt Error: %d\n", err);
1900 pr_debug("adapter->have_msi : %d flags : 0x%04x return : 0x%04x\n",
1901 adapter->have_msi, flags, err);
1902 return err;
1903}
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913int pch_gbe_up(struct pch_gbe_adapter *adapter)
1914{
1915 struct net_device *netdev = adapter->netdev;
1916 struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring;
1917 struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring;
1918 int err = -EINVAL;
1919
1920
1921 if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
1922 pr_err("Error: Invalid MAC address\n");
1923 goto out;
1924 }
1925
1926
1927 pch_gbe_set_multi(netdev);
1928
1929 pch_gbe_setup_tctl(adapter);
1930 pch_gbe_configure_tx(adapter);
1931 pch_gbe_setup_rctl(adapter);
1932 pch_gbe_configure_rx(adapter);
1933
1934 err = pch_gbe_request_irq(adapter);
1935 if (err) {
1936 pr_err("Error: can't bring device up - irq request failed\n");
1937 goto out;
1938 }
1939 err = pch_gbe_alloc_rx_buffers_pool(adapter, rx_ring, rx_ring->count);
1940 if (err) {
1941 pr_err("Error: can't bring device up - alloc rx buffers pool failed\n");
1942 goto freeirq;
1943 }
1944 pch_gbe_alloc_tx_buffers(adapter, tx_ring);
1945 pch_gbe_alloc_rx_buffers(adapter, rx_ring, rx_ring->count);
1946 adapter->tx_queue_len = netdev->tx_queue_len;
1947 pch_gbe_enable_dma_rx(&adapter->hw);
1948 pch_gbe_enable_mac_rx(&adapter->hw);
1949
1950 mod_timer(&adapter->watchdog_timer, jiffies);
1951
1952 napi_enable(&adapter->napi);
1953 pch_gbe_irq_enable(adapter);
1954 netif_start_queue(adapter->netdev);
1955
1956 return 0;
1957
1958freeirq:
1959 pch_gbe_free_irq(adapter);
1960out:
1961 return err;
1962}
1963
1964
1965
1966
1967
1968void pch_gbe_down(struct pch_gbe_adapter *adapter)
1969{
1970 struct net_device *netdev = adapter->netdev;
1971 struct pci_dev *pdev = adapter->pdev;
1972 struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring;
1973
1974
1975
1976 napi_disable(&adapter->napi);
1977 atomic_set(&adapter->irq_sem, 0);
1978
1979 pch_gbe_irq_disable(adapter);
1980 pch_gbe_free_irq(adapter);
1981
1982 del_timer_sync(&adapter->watchdog_timer);
1983
1984 netdev->tx_queue_len = adapter->tx_queue_len;
1985 netif_carrier_off(netdev);
1986 netif_stop_queue(netdev);
1987
1988 if ((pdev->error_state) && (pdev->error_state != pci_channel_io_normal))
1989 pch_gbe_reset(adapter);
1990 pch_gbe_clean_tx_ring(adapter, adapter->tx_ring);
1991 pch_gbe_clean_rx_ring(adapter, adapter->rx_ring);
1992
1993 pci_free_consistent(adapter->pdev, rx_ring->rx_buff_pool_size,
1994 rx_ring->rx_buff_pool, rx_ring->rx_buff_pool_logic);
1995 rx_ring->rx_buff_pool_logic = 0;
1996 rx_ring->rx_buff_pool_size = 0;
1997 rx_ring->rx_buff_pool = NULL;
1998}
1999
2000
2001
2002
2003
2004
2005
2006
2007static int pch_gbe_sw_init(struct pch_gbe_adapter *adapter)
2008{
2009 struct pch_gbe_hw *hw = &adapter->hw;
2010 struct net_device *netdev = adapter->netdev;
2011
2012 adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_2048;
2013 hw->mac.max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
2014 hw->mac.min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
2015
2016
2017 if (pch_gbe_hal_setup_init_funcs(hw)) {
2018 pr_err("Hardware Initialization Failure\n");
2019 return -EIO;
2020 }
2021 if (pch_gbe_alloc_queues(adapter)) {
2022 pr_err("Unable to allocate memory for queues\n");
2023 return -ENOMEM;
2024 }
2025 spin_lock_init(&adapter->hw.miim_lock);
2026 spin_lock_init(&adapter->stats_lock);
2027 spin_lock_init(&adapter->ethtool_lock);
2028 atomic_set(&adapter->irq_sem, 0);
2029 pch_gbe_irq_disable(adapter);
2030
2031 pch_gbe_init_stats(adapter);
2032
2033 pr_debug("rx_buffer_len : %d mac.min_frame_size : %d mac.max_frame_size : %d\n",
2034 (u32) adapter->rx_buffer_len,
2035 hw->mac.min_frame_size, hw->mac.max_frame_size);
2036 return 0;
2037}
2038
2039
2040
2041
2042
2043
2044
2045
2046static int pch_gbe_open(struct net_device *netdev)
2047{
2048 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2049 struct pch_gbe_hw *hw = &adapter->hw;
2050 int err;
2051
2052
2053 err = pch_gbe_setup_tx_resources(adapter, adapter->tx_ring);
2054 if (err)
2055 goto err_setup_tx;
2056
2057 err = pch_gbe_setup_rx_resources(adapter, adapter->rx_ring);
2058 if (err)
2059 goto err_setup_rx;
2060 pch_gbe_hal_power_up_phy(hw);
2061 err = pch_gbe_up(adapter);
2062 if (err)
2063 goto err_up;
2064 pr_debug("Success End\n");
2065 return 0;
2066
2067err_up:
2068 if (!adapter->wake_up_evt)
2069 pch_gbe_hal_power_down_phy(hw);
2070 pch_gbe_free_rx_resources(adapter, adapter->rx_ring);
2071err_setup_rx:
2072 pch_gbe_free_tx_resources(adapter, adapter->tx_ring);
2073err_setup_tx:
2074 pch_gbe_reset(adapter);
2075 pr_err("Error End\n");
2076 return err;
2077}
2078
2079
2080
2081
2082
2083
2084
2085static int pch_gbe_stop(struct net_device *netdev)
2086{
2087 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2088 struct pch_gbe_hw *hw = &adapter->hw;
2089
2090 pch_gbe_down(adapter);
2091 if (!adapter->wake_up_evt)
2092 pch_gbe_hal_power_down_phy(hw);
2093 pch_gbe_free_tx_resources(adapter, adapter->tx_ring);
2094 pch_gbe_free_rx_resources(adapter, adapter->rx_ring);
2095 return 0;
2096}
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106static int pch_gbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2107{
2108 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2109 struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring;
2110 unsigned long flags;
2111
2112 if (!spin_trylock_irqsave(&tx_ring->tx_lock, flags)) {
2113
2114 return NETDEV_TX_LOCKED;
2115 }
2116 if (unlikely(!PCH_GBE_DESC_UNUSED(tx_ring))) {
2117 netif_stop_queue(netdev);
2118 spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
2119 pr_debug("Return : BUSY next_to use : 0x%08x next_to clean : 0x%08x\n",
2120 tx_ring->next_to_use, tx_ring->next_to_clean);
2121 return NETDEV_TX_BUSY;
2122 }
2123
2124
2125 pch_gbe_tx_queue(adapter, tx_ring, skb);
2126 spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
2127 return NETDEV_TX_OK;
2128}
2129
2130
2131
2132
2133
2134
2135static struct net_device_stats *pch_gbe_get_stats(struct net_device *netdev)
2136{
2137
2138 return &netdev->stats;
2139}
2140
2141
2142
2143
2144
2145static void pch_gbe_set_multi(struct net_device *netdev)
2146{
2147 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2148 struct pch_gbe_hw *hw = &adapter->hw;
2149 struct netdev_hw_addr *ha;
2150 u8 *mta_list;
2151 u32 rctl;
2152 int i;
2153 int mc_count;
2154
2155 pr_debug("netdev->flags : 0x%08x\n", netdev->flags);
2156
2157
2158 rctl = ioread32(&hw->reg->RX_MODE);
2159 mc_count = netdev_mc_count(netdev);
2160 if ((netdev->flags & IFF_PROMISC)) {
2161 rctl &= ~PCH_GBE_ADD_FIL_EN;
2162 rctl &= ~PCH_GBE_MLT_FIL_EN;
2163 } else if ((netdev->flags & IFF_ALLMULTI)) {
2164
2165 rctl |= PCH_GBE_ADD_FIL_EN;
2166 rctl &= ~PCH_GBE_MLT_FIL_EN;
2167 } else {
2168 if (mc_count >= PCH_GBE_MAR_ENTRIES) {
2169
2170 rctl |= PCH_GBE_ADD_FIL_EN;
2171 rctl &= ~PCH_GBE_MLT_FIL_EN;
2172 } else {
2173 rctl |= (PCH_GBE_ADD_FIL_EN | PCH_GBE_MLT_FIL_EN);
2174 }
2175 }
2176 iowrite32(rctl, &hw->reg->RX_MODE);
2177
2178 if (mc_count >= PCH_GBE_MAR_ENTRIES)
2179 return;
2180 mta_list = kmalloc(mc_count * ETH_ALEN, GFP_ATOMIC);
2181 if (!mta_list)
2182 return;
2183
2184
2185 i = 0;
2186 netdev_for_each_mc_addr(ha, netdev) {
2187 if (i == mc_count)
2188 break;
2189 memcpy(mta_list + (i++ * ETH_ALEN), &ha->addr, ETH_ALEN);
2190 }
2191 pch_gbe_mac_mc_addr_list_update(hw, mta_list, i, 1,
2192 PCH_GBE_MAR_ENTRIES);
2193 kfree(mta_list);
2194
2195 pr_debug("RX_MODE reg(check bit31,30 ADD,MLT) : 0x%08x netdev->mc_count : 0x%08x\n",
2196 ioread32(&hw->reg->RX_MODE), mc_count);
2197}
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207static int pch_gbe_set_mac(struct net_device *netdev, void *addr)
2208{
2209 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2210 struct sockaddr *skaddr = addr;
2211 int ret_val;
2212
2213 if (!is_valid_ether_addr(skaddr->sa_data)) {
2214 ret_val = -EADDRNOTAVAIL;
2215 } else {
2216 memcpy(netdev->dev_addr, skaddr->sa_data, netdev->addr_len);
2217 memcpy(adapter->hw.mac.addr, skaddr->sa_data, netdev->addr_len);
2218 pch_gbe_mac_mar_set(&adapter->hw, adapter->hw.mac.addr, 0);
2219 ret_val = 0;
2220 }
2221 pr_debug("ret_val : 0x%08x\n", ret_val);
2222 pr_debug("dev_addr : %pM\n", netdev->dev_addr);
2223 pr_debug("mac_addr : %pM\n", adapter->hw.mac.addr);
2224 pr_debug("MAC_ADR1AB reg : 0x%08x 0x%08x\n",
2225 ioread32(&adapter->hw.reg->mac_adr[0].high),
2226 ioread32(&adapter->hw.reg->mac_adr[0].low));
2227 return ret_val;
2228}
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238static int pch_gbe_change_mtu(struct net_device *netdev, int new_mtu)
2239{
2240 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2241 int max_frame;
2242 unsigned long old_rx_buffer_len = adapter->rx_buffer_len;
2243 int err;
2244
2245 max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
2246 if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||
2247 (max_frame > PCH_GBE_MAX_JUMBO_FRAME_SIZE)) {
2248 pr_err("Invalid MTU setting\n");
2249 return -EINVAL;
2250 }
2251 if (max_frame <= PCH_GBE_FRAME_SIZE_2048)
2252 adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_2048;
2253 else if (max_frame <= PCH_GBE_FRAME_SIZE_4096)
2254 adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_4096;
2255 else if (max_frame <= PCH_GBE_FRAME_SIZE_8192)
2256 adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_8192;
2257 else
2258 adapter->rx_buffer_len = PCH_GBE_MAX_RX_BUFFER_SIZE;
2259
2260 if (netif_running(netdev)) {
2261 pch_gbe_down(adapter);
2262 err = pch_gbe_up(adapter);
2263 if (err) {
2264 adapter->rx_buffer_len = old_rx_buffer_len;
2265 pch_gbe_up(adapter);
2266 return err;
2267 } else {
2268 netdev->mtu = new_mtu;
2269 adapter->hw.mac.max_frame_size = max_frame;
2270 }
2271 } else {
2272 pch_gbe_reset(adapter);
2273 netdev->mtu = new_mtu;
2274 adapter->hw.mac.max_frame_size = max_frame;
2275 }
2276
2277 pr_debug("max_frame : %d rx_buffer_len : %d mtu : %d max_frame_size : %d\n",
2278 max_frame, (u32) adapter->rx_buffer_len, netdev->mtu,
2279 adapter->hw.mac.max_frame_size);
2280 return 0;
2281}
2282
2283
2284
2285
2286
2287
2288
2289
2290static int pch_gbe_set_features(struct net_device *netdev,
2291 netdev_features_t features)
2292{
2293 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2294 netdev_features_t changed = features ^ netdev->features;
2295
2296 if (!(changed & NETIF_F_RXCSUM))
2297 return 0;
2298
2299 if (netif_running(netdev))
2300 pch_gbe_reinit_locked(adapter);
2301 else
2302 pch_gbe_reset(adapter);
2303
2304 return 0;
2305}
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316static int pch_gbe_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2317{
2318 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2319
2320 pr_debug("cmd : 0x%04x\n", cmd);
2321
2322 if (cmd == SIOCSHWTSTAMP)
2323 return hwtstamp_ioctl(netdev, ifr, cmd);
2324
2325 return generic_mii_ioctl(&adapter->mii, if_mii(ifr), cmd, NULL);
2326}
2327
2328
2329
2330
2331
2332static void pch_gbe_tx_timeout(struct net_device *netdev)
2333{
2334 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2335
2336
2337 adapter->stats.tx_timeout_count++;
2338 schedule_work(&adapter->reset_task);
2339}
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349static int pch_gbe_napi_poll(struct napi_struct *napi, int budget)
2350{
2351 struct pch_gbe_adapter *adapter =
2352 container_of(napi, struct pch_gbe_adapter, napi);
2353 int work_done = 0;
2354 bool poll_end_flag = false;
2355 bool cleaned = false;
2356
2357 pr_debug("budget : %d\n", budget);
2358
2359 pch_gbe_clean_rx(adapter, adapter->rx_ring, &work_done, budget);
2360 cleaned = pch_gbe_clean_tx(adapter, adapter->tx_ring);
2361
2362 if (cleaned)
2363 work_done = budget;
2364
2365
2366
2367 if (work_done < budget)
2368 poll_end_flag = true;
2369
2370 if (poll_end_flag) {
2371 napi_complete(napi);
2372 pch_gbe_irq_enable(adapter);
2373 }
2374
2375 if (adapter->rx_stop_flag) {
2376 adapter->rx_stop_flag = false;
2377 pch_gbe_enable_dma_rx(&adapter->hw);
2378 }
2379
2380 pr_debug("poll_end_flag : %d work_done : %d budget : %d\n",
2381 poll_end_flag, work_done, budget);
2382
2383 return work_done;
2384}
2385
2386#ifdef CONFIG_NET_POLL_CONTROLLER
2387
2388
2389
2390
2391static void pch_gbe_netpoll(struct net_device *netdev)
2392{
2393 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2394
2395 disable_irq(adapter->pdev->irq);
2396 pch_gbe_intr(adapter->pdev->irq, netdev);
2397 enable_irq(adapter->pdev->irq);
2398}
2399#endif
2400
2401static const struct net_device_ops pch_gbe_netdev_ops = {
2402 .ndo_open = pch_gbe_open,
2403 .ndo_stop = pch_gbe_stop,
2404 .ndo_start_xmit = pch_gbe_xmit_frame,
2405 .ndo_get_stats = pch_gbe_get_stats,
2406 .ndo_set_mac_address = pch_gbe_set_mac,
2407 .ndo_tx_timeout = pch_gbe_tx_timeout,
2408 .ndo_change_mtu_rh74 = pch_gbe_change_mtu,
2409 .ndo_set_features = pch_gbe_set_features,
2410 .ndo_do_ioctl = pch_gbe_ioctl,
2411 .ndo_set_rx_mode = pch_gbe_set_multi,
2412#ifdef CONFIG_NET_POLL_CONTROLLER
2413 .ndo_poll_controller = pch_gbe_netpoll,
2414#endif
2415};
2416
2417static pci_ers_result_t pch_gbe_io_error_detected(struct pci_dev *pdev,
2418 pci_channel_state_t state)
2419{
2420 struct net_device *netdev = pci_get_drvdata(pdev);
2421 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2422
2423 netif_device_detach(netdev);
2424 if (netif_running(netdev))
2425 pch_gbe_down(adapter);
2426 pci_disable_device(pdev);
2427
2428 return PCI_ERS_RESULT_NEED_RESET;
2429}
2430
2431static pci_ers_result_t pch_gbe_io_slot_reset(struct pci_dev *pdev)
2432{
2433 struct net_device *netdev = pci_get_drvdata(pdev);
2434 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2435 struct pch_gbe_hw *hw = &adapter->hw;
2436
2437 if (pci_enable_device(pdev)) {
2438 pr_err("Cannot re-enable PCI device after reset\n");
2439 return PCI_ERS_RESULT_DISCONNECT;
2440 }
2441 pci_set_master(pdev);
2442 pci_enable_wake(pdev, PCI_D0, 0);
2443 pch_gbe_hal_power_up_phy(hw);
2444 pch_gbe_reset(adapter);
2445
2446 pch_gbe_mac_set_wol_event(hw, 0);
2447
2448 return PCI_ERS_RESULT_RECOVERED;
2449}
2450
2451static void pch_gbe_io_resume(struct pci_dev *pdev)
2452{
2453 struct net_device *netdev = pci_get_drvdata(pdev);
2454 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2455
2456 if (netif_running(netdev)) {
2457 if (pch_gbe_up(adapter)) {
2458 pr_debug("can't bring device back up after reset\n");
2459 return;
2460 }
2461 }
2462 netif_device_attach(netdev);
2463}
2464
2465static int __pch_gbe_suspend(struct pci_dev *pdev)
2466{
2467 struct net_device *netdev = pci_get_drvdata(pdev);
2468 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2469 struct pch_gbe_hw *hw = &adapter->hw;
2470 u32 wufc = adapter->wake_up_evt;
2471 int retval = 0;
2472
2473 netif_device_detach(netdev);
2474 if (netif_running(netdev))
2475 pch_gbe_down(adapter);
2476 if (wufc) {
2477 pch_gbe_set_multi(netdev);
2478 pch_gbe_setup_rctl(adapter);
2479 pch_gbe_configure_rx(adapter);
2480 pch_gbe_set_rgmii_ctrl(adapter, hw->mac.link_speed,
2481 hw->mac.link_duplex);
2482 pch_gbe_set_mode(adapter, hw->mac.link_speed,
2483 hw->mac.link_duplex);
2484 pch_gbe_mac_set_wol_event(hw, wufc);
2485 pci_disable_device(pdev);
2486 } else {
2487 pch_gbe_hal_power_down_phy(hw);
2488 pch_gbe_mac_set_wol_event(hw, wufc);
2489 pci_disable_device(pdev);
2490 }
2491 return retval;
2492}
2493
2494#ifdef CONFIG_PM
2495static int pch_gbe_suspend(struct device *device)
2496{
2497 struct pci_dev *pdev = to_pci_dev(device);
2498
2499 return __pch_gbe_suspend(pdev);
2500}
2501
2502static int pch_gbe_resume(struct device *device)
2503{
2504 struct pci_dev *pdev = to_pci_dev(device);
2505 struct net_device *netdev = pci_get_drvdata(pdev);
2506 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2507 struct pch_gbe_hw *hw = &adapter->hw;
2508 u32 err;
2509
2510 err = pci_enable_device(pdev);
2511 if (err) {
2512 pr_err("Cannot enable PCI device from suspend\n");
2513 return err;
2514 }
2515 pci_set_master(pdev);
2516 pch_gbe_hal_power_up_phy(hw);
2517 pch_gbe_reset(adapter);
2518
2519 pch_gbe_mac_set_wol_event(hw, 0);
2520
2521 if (netif_running(netdev))
2522 pch_gbe_up(adapter);
2523 netif_device_attach(netdev);
2524
2525 return 0;
2526}
2527#endif
2528
2529static void pch_gbe_shutdown(struct pci_dev *pdev)
2530{
2531 __pch_gbe_suspend(pdev);
2532 if (system_state == SYSTEM_POWER_OFF) {
2533 pci_wake_from_d3(pdev, true);
2534 pci_set_power_state(pdev, PCI_D3hot);
2535 }
2536}
2537
2538static void pch_gbe_remove(struct pci_dev *pdev)
2539{
2540 struct net_device *netdev = pci_get_drvdata(pdev);
2541 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2542
2543 cancel_work_sync(&adapter->reset_task);
2544 unregister_netdev(netdev);
2545
2546 pch_gbe_hal_phy_hw_reset(&adapter->hw);
2547
2548 kfree(adapter->tx_ring);
2549 kfree(adapter->rx_ring);
2550
2551 iounmap(adapter->hw.reg);
2552 pci_release_regions(pdev);
2553 free_netdev(netdev);
2554 pci_disable_device(pdev);
2555}
2556
2557static int pch_gbe_probe(struct pci_dev *pdev,
2558 const struct pci_device_id *pci_id)
2559{
2560 struct net_device *netdev;
2561 struct pch_gbe_adapter *adapter;
2562 int ret;
2563
2564 ret = pci_enable_device(pdev);
2565 if (ret)
2566 return ret;
2567
2568 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
2569 || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
2570 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2571 if (ret) {
2572 ret = pci_set_consistent_dma_mask(pdev,
2573 DMA_BIT_MASK(32));
2574 if (ret) {
2575 dev_err(&pdev->dev, "ERR: No usable DMA "
2576 "configuration, aborting\n");
2577 goto err_disable_device;
2578 }
2579 }
2580 }
2581
2582 ret = pci_request_regions(pdev, KBUILD_MODNAME);
2583 if (ret) {
2584 dev_err(&pdev->dev,
2585 "ERR: Can't reserve PCI I/O and memory resources\n");
2586 goto err_disable_device;
2587 }
2588 pci_set_master(pdev);
2589
2590 netdev = alloc_etherdev((int)sizeof(struct pch_gbe_adapter));
2591 if (!netdev) {
2592 ret = -ENOMEM;
2593 goto err_release_pci;
2594 }
2595 SET_NETDEV_DEV(netdev, &pdev->dev);
2596
2597 pci_set_drvdata(pdev, netdev);
2598 adapter = netdev_priv(netdev);
2599 adapter->netdev = netdev;
2600 adapter->pdev = pdev;
2601 adapter->hw.back = adapter;
2602 adapter->hw.reg = pci_iomap(pdev, PCH_GBE_PCI_BAR, 0);
2603 if (!adapter->hw.reg) {
2604 ret = -EIO;
2605 dev_err(&pdev->dev, "Can't ioremap\n");
2606 goto err_free_netdev;
2607 }
2608
2609 adapter->ptp_pdev = pci_get_bus_and_slot(adapter->pdev->bus->number,
2610 PCI_DEVFN(12, 4));
2611 if (ptp_filter_init(ptp_filter, ARRAY_SIZE(ptp_filter))) {
2612 pr_err("Bad ptp filter\n");
2613 return -EINVAL;
2614 }
2615
2616 netdev->netdev_ops = &pch_gbe_netdev_ops;
2617 netdev->watchdog_timeo = PCH_GBE_WATCHDOG_PERIOD;
2618 netif_napi_add(netdev, &adapter->napi,
2619 pch_gbe_napi_poll, PCH_GBE_RX_WEIGHT);
2620 netdev->hw_features = NETIF_F_RXCSUM |
2621 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2622 netdev->features = netdev->hw_features;
2623 pch_gbe_set_ethtool_ops(netdev);
2624
2625 pch_gbe_mac_load_mac_addr(&adapter->hw);
2626 pch_gbe_mac_reset_hw(&adapter->hw);
2627
2628
2629 ret = pch_gbe_sw_init(adapter);
2630 if (ret)
2631 goto err_iounmap;
2632
2633
2634 ret = pch_gbe_init_phy(adapter);
2635 if (ret) {
2636 dev_err(&pdev->dev, "PHY initialize error\n");
2637 goto err_free_adapter;
2638 }
2639 pch_gbe_hal_get_bus_info(&adapter->hw);
2640
2641
2642 ret = pch_gbe_hal_read_mac_addr(&adapter->hw);
2643 if (ret) {
2644 dev_err(&pdev->dev, "MAC address Read Error\n");
2645 goto err_free_adapter;
2646 }
2647
2648 memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
2649 if (!is_valid_ether_addr(netdev->dev_addr)) {
2650
2651
2652
2653
2654
2655
2656 dev_err(&pdev->dev, "Invalid MAC address, "
2657 "interface disabled.\n");
2658 }
2659 setup_timer(&adapter->watchdog_timer, pch_gbe_watchdog,
2660 (unsigned long)adapter);
2661
2662 INIT_WORK(&adapter->reset_task, pch_gbe_reset_task);
2663
2664 pch_gbe_check_options(adapter);
2665
2666
2667 adapter->wake_up_evt = PCH_GBE_WL_INIT_SETTING;
2668 dev_info(&pdev->dev, "MAC address : %pM\n", netdev->dev_addr);
2669
2670
2671 pch_gbe_reset(adapter);
2672
2673 ret = register_netdev(netdev);
2674 if (ret)
2675 goto err_free_adapter;
2676
2677 netif_carrier_off(netdev);
2678 netif_stop_queue(netdev);
2679
2680 dev_dbg(&pdev->dev, "PCH Network Connection\n");
2681
2682 device_set_wakeup_enable(&pdev->dev, 1);
2683 return 0;
2684
2685err_free_adapter:
2686 pch_gbe_hal_phy_hw_reset(&adapter->hw);
2687 kfree(adapter->tx_ring);
2688 kfree(adapter->rx_ring);
2689err_iounmap:
2690 iounmap(adapter->hw.reg);
2691err_free_netdev:
2692 free_netdev(netdev);
2693err_release_pci:
2694 pci_release_regions(pdev);
2695err_disable_device:
2696 pci_disable_device(pdev);
2697 return ret;
2698}
2699
2700static const struct pci_device_id pch_gbe_pcidev_id[] = {
2701 {.vendor = PCI_VENDOR_ID_INTEL,
2702 .device = PCI_DEVICE_ID_INTEL_IOH1_GBE,
2703 .subvendor = PCI_ANY_ID,
2704 .subdevice = PCI_ANY_ID,
2705 .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
2706 .class_mask = (0xFFFF00)
2707 },
2708 {.vendor = PCI_VENDOR_ID_ROHM,
2709 .device = PCI_DEVICE_ID_ROHM_ML7223_GBE,
2710 .subvendor = PCI_ANY_ID,
2711 .subdevice = PCI_ANY_ID,
2712 .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
2713 .class_mask = (0xFFFF00)
2714 },
2715 {.vendor = PCI_VENDOR_ID_ROHM,
2716 .device = PCI_DEVICE_ID_ROHM_ML7831_GBE,
2717 .subvendor = PCI_ANY_ID,
2718 .subdevice = PCI_ANY_ID,
2719 .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
2720 .class_mask = (0xFFFF00)
2721 },
2722
2723 {0}
2724};
2725
2726#ifdef CONFIG_PM
2727static const struct dev_pm_ops pch_gbe_pm_ops = {
2728 .suspend = pch_gbe_suspend,
2729 .resume = pch_gbe_resume,
2730 .freeze = pch_gbe_suspend,
2731 .thaw = pch_gbe_resume,
2732 .poweroff = pch_gbe_suspend,
2733 .restore = pch_gbe_resume,
2734};
2735#endif
2736
2737static const struct pci_error_handlers pch_gbe_err_handler = {
2738 .error_detected = pch_gbe_io_error_detected,
2739 .slot_reset = pch_gbe_io_slot_reset,
2740 .resume = pch_gbe_io_resume
2741};
2742
2743static struct pci_driver pch_gbe_driver = {
2744 .name = KBUILD_MODNAME,
2745 .id_table = pch_gbe_pcidev_id,
2746 .probe = pch_gbe_probe,
2747 .remove = pch_gbe_remove,
2748#ifdef CONFIG_PM
2749 .driver.pm = &pch_gbe_pm_ops,
2750#endif
2751 .shutdown = pch_gbe_shutdown,
2752 .err_handler = &pch_gbe_err_handler
2753};
2754
2755
2756static int __init pch_gbe_init_module(void)
2757{
2758 int ret;
2759
2760 pr_info("EG20T PCH Gigabit Ethernet Driver - version %s\n",DRV_VERSION);
2761 ret = pci_register_driver(&pch_gbe_driver);
2762 if (copybreak != PCH_GBE_COPYBREAK_DEFAULT) {
2763 if (copybreak == 0) {
2764 pr_info("copybreak disabled\n");
2765 } else {
2766 pr_info("copybreak enabled for packets <= %u bytes\n",
2767 copybreak);
2768 }
2769 }
2770 return ret;
2771}
2772
2773static void __exit pch_gbe_exit_module(void)
2774{
2775 pci_unregister_driver(&pch_gbe_driver);
2776}
2777
2778module_init(pch_gbe_init_module);
2779module_exit(pch_gbe_exit_module);
2780
2781MODULE_DESCRIPTION("EG20T PCH Gigabit ethernet Driver");
2782MODULE_AUTHOR("LAPIS SEMICONDUCTOR, <tshimizu818@gmail.com>");
2783MODULE_LICENSE("GPL");
2784MODULE_VERSION(DRV_VERSION);
2785MODULE_DEVICE_TABLE(pci, pch_gbe_pcidev_id);
2786
2787module_param(copybreak, uint, 0644);
2788MODULE_PARM_DESC(copybreak,
2789 "Maximum size of packet that is copied to a new buffer on receive");
2790
2791
2792