1
2
3
4
5
6
7
8
9#include "pch_gbe.h"
10#include "pch_gbe_phy.h"
11#include <linux/module.h>
12#include <linux/net_tstamp.h>
13#include <linux/ptp_classify.h>
14#include <linux/ptp_pch.h>
15#include <linux/gpio.h>
16
17#define DRV_VERSION "1.01"
18const char pch_driver_version[] = DRV_VERSION;
19
20#define PCH_GBE_MAR_ENTRIES 16
21#define PCH_GBE_SHORT_PKT 64
22#define DSC_INIT16 0xC000
23#define PCH_GBE_DMA_ALIGN 0
24#define PCH_GBE_DMA_PADDING 2
25#define PCH_GBE_WATCHDOG_PERIOD (5 * HZ)
26#define PCH_GBE_PCI_BAR 1
27#define PCH_GBE_RESERVE_MEMORY 0x200000
28
29#define PCI_DEVICE_ID_INTEL_IOH1_GBE 0x8802
30
31#define PCI_DEVICE_ID_ROHM_ML7223_GBE 0x8013
32#define PCI_DEVICE_ID_ROHM_ML7831_GBE 0x8802
33
34#define PCH_GBE_TX_WEIGHT 64
35#define PCH_GBE_RX_WEIGHT 64
36#define PCH_GBE_RX_BUFFER_WRITE 16
37
38
39#define PCH_GBE_WL_INIT_SETTING (PCH_GBE_WLC_MP)
40
41#define PCH_GBE_MAC_RGMII_CTRL_SETTING ( \
42 PCH_GBE_CHIP_TYPE_INTERNAL | \
43 PCH_GBE_RGMII_MODE_RGMII \
44 )
45
46
47#define PCH_GBE_MAX_RX_BUFFER_SIZE 0x2880
48#define PCH_GBE_MAX_JUMBO_FRAME_SIZE 10318
49#define PCH_GBE_FRAME_SIZE_2048 2048
50#define PCH_GBE_FRAME_SIZE_4096 4096
51#define PCH_GBE_FRAME_SIZE_8192 8192
52
53#define PCH_GBE_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i]))
54#define PCH_GBE_RX_DESC(R, i) PCH_GBE_GET_DESC(R, i, pch_gbe_rx_desc)
55#define PCH_GBE_TX_DESC(R, i) PCH_GBE_GET_DESC(R, i, pch_gbe_tx_desc)
56#define PCH_GBE_DESC_UNUSED(R) \
57 ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
58 (R)->next_to_clean - (R)->next_to_use - 1)
59
60
61#define PCH_GBE_PAUSE_PKT1_VALUE 0x00C28001
62#define PCH_GBE_PAUSE_PKT2_VALUE 0x00000100
63#define PCH_GBE_PAUSE_PKT4_VALUE 0x01000888
64#define PCH_GBE_PAUSE_PKT5_VALUE 0x0000FFFF
65
66
67
68
69
70
71
72
73
74
75#define PCH_GBE_INT_ENABLE_MASK ( \
76 PCH_GBE_INT_RX_DMA_CMPLT | \
77 PCH_GBE_INT_RX_DSC_EMP | \
78 PCH_GBE_INT_RX_FIFO_ERR | \
79 PCH_GBE_INT_WOL_DET | \
80 PCH_GBE_INT_TX_CMPLT \
81 )
82
83#define PCH_GBE_INT_DISABLE_ALL 0
84
85
86
87#define MASTER_MODE (1<<0)
88#define SLAVE_MODE (0)
89#define V2_MODE (1<<31)
90#define CAP_MODE0 (0)
91#define CAP_MODE2 (1<<17)
92
93
94#define TX_SNAPSHOT_LOCKED (1<<0)
95#define RX_SNAPSHOT_LOCKED (1<<1)
96
97#define PTP_L4_MULTICAST_SA "01:00:5e:00:01:81"
98#define PTP_L2_MULTICAST_SA "01:1b:19:00:00:00"
99
100#define MINNOW_PHY_RESET_GPIO 13
101
102static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg);
103static void pch_gbe_mdio_write(struct net_device *netdev, int addr, int reg,
104 int data);
105static void pch_gbe_set_multi(struct net_device *netdev);
106
107static int pch_ptp_match(struct sk_buff *skb, u16 uid_hi, u32 uid_lo, u16 seqid)
108{
109 u8 *data = skb->data;
110 unsigned int offset;
111 u16 *hi, *id;
112 u32 lo;
113
114 if (ptp_classify_raw(skb) == PTP_CLASS_NONE)
115 return 0;
116
117 offset = ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN;
118
119 if (skb->len < offset + OFF_PTP_SEQUENCE_ID + sizeof(seqid))
120 return 0;
121
122 hi = (u16 *)(data + offset + OFF_PTP_SOURCE_UUID);
123 id = (u16 *)(data + offset + OFF_PTP_SEQUENCE_ID);
124
125 memcpy(&lo, &hi[1], sizeof(lo));
126
127 return (uid_hi == *hi &&
128 uid_lo == lo &&
129 seqid == *id);
130}
131
132static void
133pch_rx_timestamp(struct pch_gbe_adapter *adapter, struct sk_buff *skb)
134{
135 struct skb_shared_hwtstamps *shhwtstamps;
136 struct pci_dev *pdev;
137 u64 ns;
138 u32 hi, lo, val;
139 u16 uid, seq;
140
141 if (!adapter->hwts_rx_en)
142 return;
143
144
145 pdev = adapter->ptp_pdev;
146
147 val = pch_ch_event_read(pdev);
148
149 if (!(val & RX_SNAPSHOT_LOCKED))
150 return;
151
152 lo = pch_src_uuid_lo_read(pdev);
153 hi = pch_src_uuid_hi_read(pdev);
154
155 uid = hi & 0xffff;
156 seq = (hi >> 16) & 0xffff;
157
158 if (!pch_ptp_match(skb, htons(uid), htonl(lo), htons(seq)))
159 goto out;
160
161 ns = pch_rx_snap_read(pdev);
162
163 shhwtstamps = skb_hwtstamps(skb);
164 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
165 shhwtstamps->hwtstamp = ns_to_ktime(ns);
166out:
167 pch_ch_event_write(pdev, RX_SNAPSHOT_LOCKED);
168}
169
170static void
171pch_tx_timestamp(struct pch_gbe_adapter *adapter, struct sk_buff *skb)
172{
173 struct skb_shared_hwtstamps shhwtstamps;
174 struct pci_dev *pdev;
175 struct skb_shared_info *shtx;
176 u64 ns;
177 u32 cnt, val;
178
179 shtx = skb_shinfo(skb);
180 if (likely(!(shtx->tx_flags & SKBTX_HW_TSTAMP && adapter->hwts_tx_en)))
181 return;
182
183 shtx->tx_flags |= SKBTX_IN_PROGRESS;
184
185
186 pdev = adapter->ptp_pdev;
187
188
189
190
191 for (cnt = 0; cnt < 100; cnt++) {
192 val = pch_ch_event_read(pdev);
193 if (val & TX_SNAPSHOT_LOCKED)
194 break;
195 udelay(1);
196 }
197 if (!(val & TX_SNAPSHOT_LOCKED)) {
198 shtx->tx_flags &= ~SKBTX_IN_PROGRESS;
199 return;
200 }
201
202 ns = pch_tx_snap_read(pdev);
203
204 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
205 shhwtstamps.hwtstamp = ns_to_ktime(ns);
206 skb_tstamp_tx(skb, &shhwtstamps);
207
208 pch_ch_event_write(pdev, TX_SNAPSHOT_LOCKED);
209}
210
211static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
212{
213 struct hwtstamp_config cfg;
214 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
215 struct pci_dev *pdev;
216 u8 station[20];
217
218 if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
219 return -EFAULT;
220
221 if (cfg.flags)
222 return -EINVAL;
223
224
225 pdev = adapter->ptp_pdev;
226
227 if (cfg.tx_type != HWTSTAMP_TX_OFF && cfg.tx_type != HWTSTAMP_TX_ON)
228 return -ERANGE;
229
230 switch (cfg.rx_filter) {
231 case HWTSTAMP_FILTER_NONE:
232 adapter->hwts_rx_en = 0;
233 break;
234 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
235 adapter->hwts_rx_en = 0;
236 pch_ch_control_write(pdev, SLAVE_MODE | CAP_MODE0);
237 break;
238 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
239 adapter->hwts_rx_en = 1;
240 pch_ch_control_write(pdev, MASTER_MODE | CAP_MODE0);
241 break;
242 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
243 adapter->hwts_rx_en = 1;
244 pch_ch_control_write(pdev, V2_MODE | CAP_MODE2);
245 strcpy(station, PTP_L4_MULTICAST_SA);
246 pch_set_station_address(station, pdev);
247 break;
248 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
249 adapter->hwts_rx_en = 1;
250 pch_ch_control_write(pdev, V2_MODE | CAP_MODE2);
251 strcpy(station, PTP_L2_MULTICAST_SA);
252 pch_set_station_address(station, pdev);
253 break;
254 default:
255 return -ERANGE;
256 }
257
258 adapter->hwts_tx_en = cfg.tx_type == HWTSTAMP_TX_ON;
259
260
261 pch_ch_event_write(pdev, TX_SNAPSHOT_LOCKED | RX_SNAPSHOT_LOCKED);
262
263 return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
264}
265
266static inline void pch_gbe_mac_load_mac_addr(struct pch_gbe_hw *hw)
267{
268 iowrite32(0x01, &hw->reg->MAC_ADDR_LOAD);
269}
270
271
272
273
274
275
276
277static s32 pch_gbe_mac_read_mac_addr(struct pch_gbe_hw *hw)
278{
279 struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
280 u32 adr1a, adr1b;
281
282 adr1a = ioread32(&hw->reg->mac_adr[0].high);
283 adr1b = ioread32(&hw->reg->mac_adr[0].low);
284
285 hw->mac.addr[0] = (u8)(adr1a & 0xFF);
286 hw->mac.addr[1] = (u8)((adr1a >> 8) & 0xFF);
287 hw->mac.addr[2] = (u8)((adr1a >> 16) & 0xFF);
288 hw->mac.addr[3] = (u8)((adr1a >> 24) & 0xFF);
289 hw->mac.addr[4] = (u8)(adr1b & 0xFF);
290 hw->mac.addr[5] = (u8)((adr1b >> 8) & 0xFF);
291
292 netdev_dbg(adapter->netdev, "hw->mac.addr : %pM\n", hw->mac.addr);
293 return 0;
294}
295
296
297
298
299
300
301static void pch_gbe_wait_clr_bit(void *reg, u32 bit)
302{
303 u32 tmp;
304
305
306 tmp = 1000;
307 while ((ioread32(reg) & bit) && --tmp)
308 cpu_relax();
309 if (!tmp)
310 pr_err("Error: busy bit is not cleared\n");
311}
312
313
314
315
316
317
318
319static void pch_gbe_mac_mar_set(struct pch_gbe_hw *hw, u8 * addr, u32 index)
320{
321 struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
322 u32 mar_low, mar_high, adrmask;
323
324 netdev_dbg(adapter->netdev, "index : 0x%x\n", index);
325
326
327
328
329
330 mar_high = ((u32) addr[0] | ((u32) addr[1] << 8) |
331 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
332 mar_low = ((u32) addr[4] | ((u32) addr[5] << 8));
333
334 adrmask = ioread32(&hw->reg->ADDR_MASK);
335 iowrite32((adrmask | (0x0001 << index)), &hw->reg->ADDR_MASK);
336
337 pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
338
339 iowrite32(mar_high, &hw->reg->mac_adr[index].high);
340 iowrite32(mar_low, &hw->reg->mac_adr[index].low);
341
342 iowrite32((adrmask & ~(0x0001 << index)), &hw->reg->ADDR_MASK);
343
344 pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
345}
346
347
348
349
350
351static void pch_gbe_mac_reset_hw(struct pch_gbe_hw *hw)
352{
353
354 pch_gbe_mac_read_mac_addr(hw);
355 iowrite32(PCH_GBE_ALL_RST, &hw->reg->RESET);
356 iowrite32(PCH_GBE_MODE_GMII_ETHER, &hw->reg->MODE);
357 pch_gbe_wait_clr_bit(&hw->reg->RESET, PCH_GBE_ALL_RST);
358
359 pch_gbe_mac_mar_set(hw, hw->mac.addr, 0);
360 return;
361}
362
363static void pch_gbe_disable_mac_rx(struct pch_gbe_hw *hw)
364{
365 u32 rctl;
366
367 rctl = ioread32(&hw->reg->MAC_RX_EN);
368 iowrite32((rctl & ~PCH_GBE_MRE_MAC_RX_EN), &hw->reg->MAC_RX_EN);
369}
370
371static void pch_gbe_enable_mac_rx(struct pch_gbe_hw *hw)
372{
373 u32 rctl;
374
375 rctl = ioread32(&hw->reg->MAC_RX_EN);
376 iowrite32((rctl | PCH_GBE_MRE_MAC_RX_EN), &hw->reg->MAC_RX_EN);
377}
378
379
380
381
382
383
384static void pch_gbe_mac_init_rx_addrs(struct pch_gbe_hw *hw, u16 mar_count)
385{
386 u32 i;
387
388
389 pch_gbe_mac_mar_set(hw, hw->mac.addr, 0);
390
391
392 for (i = 1; i < mar_count; i++) {
393 iowrite32(0, &hw->reg->mac_adr[i].high);
394 iowrite32(0, &hw->reg->mac_adr[i].low);
395 }
396 iowrite32(0xFFFE, &hw->reg->ADDR_MASK);
397
398 pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
399}
400
401
402
403
404
405
406
407
408s32 pch_gbe_mac_force_mac_fc(struct pch_gbe_hw *hw)
409{
410 struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
411 struct pch_gbe_mac_info *mac = &hw->mac;
412 u32 rx_fctrl;
413
414 netdev_dbg(adapter->netdev, "mac->fc = %u\n", mac->fc);
415
416 rx_fctrl = ioread32(&hw->reg->RX_FCTRL);
417
418 switch (mac->fc) {
419 case PCH_GBE_FC_NONE:
420 rx_fctrl &= ~PCH_GBE_FL_CTRL_EN;
421 mac->tx_fc_enable = false;
422 break;
423 case PCH_GBE_FC_RX_PAUSE:
424 rx_fctrl |= PCH_GBE_FL_CTRL_EN;
425 mac->tx_fc_enable = false;
426 break;
427 case PCH_GBE_FC_TX_PAUSE:
428 rx_fctrl &= ~PCH_GBE_FL_CTRL_EN;
429 mac->tx_fc_enable = true;
430 break;
431 case PCH_GBE_FC_FULL:
432 rx_fctrl |= PCH_GBE_FL_CTRL_EN;
433 mac->tx_fc_enable = true;
434 break;
435 default:
436 netdev_err(adapter->netdev,
437 "Flow control param set incorrectly\n");
438 return -EINVAL;
439 }
440 if (mac->link_duplex == DUPLEX_HALF)
441 rx_fctrl &= ~PCH_GBE_FL_CTRL_EN;
442 iowrite32(rx_fctrl, &hw->reg->RX_FCTRL);
443 netdev_dbg(adapter->netdev,
444 "RX_FCTRL reg : 0x%08x mac->tx_fc_enable : %d\n",
445 ioread32(&hw->reg->RX_FCTRL), mac->tx_fc_enable);
446 return 0;
447}
448
449
450
451
452
453
454static void pch_gbe_mac_set_wol_event(struct pch_gbe_hw *hw, u32 wu_evt)
455{
456 struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
457 u32 addr_mask;
458
459 netdev_dbg(adapter->netdev, "wu_evt : 0x%08x ADDR_MASK reg : 0x%08x\n",
460 wu_evt, ioread32(&hw->reg->ADDR_MASK));
461
462 if (wu_evt) {
463
464 addr_mask = ioread32(&hw->reg->ADDR_MASK);
465 iowrite32(addr_mask, &hw->reg->WOL_ADDR_MASK);
466
467 pch_gbe_wait_clr_bit(&hw->reg->WOL_ADDR_MASK, PCH_GBE_WLA_BUSY);
468 iowrite32(0, &hw->reg->WOL_ST);
469 iowrite32((wu_evt | PCH_GBE_WLC_WOL_MODE), &hw->reg->WOL_CTRL);
470 iowrite32(0x02, &hw->reg->TCPIP_ACC);
471 iowrite32(PCH_GBE_INT_ENABLE_MASK, &hw->reg->INT_EN);
472 } else {
473 iowrite32(0, &hw->reg->WOL_CTRL);
474 iowrite32(0, &hw->reg->WOL_ST);
475 }
476 return;
477}
478
479
480
481
482
483
484
485
486
487
488
489u16 pch_gbe_mac_ctrl_miim(struct pch_gbe_hw *hw, u32 addr, u32 dir, u32 reg,
490 u16 data)
491{
492 struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
493 u32 data_out = 0;
494 unsigned int i;
495 unsigned long flags;
496
497 spin_lock_irqsave(&hw->miim_lock, flags);
498
499 for (i = 100; i; --i) {
500 if ((ioread32(&hw->reg->MIIM) & PCH_GBE_MIIM_OPER_READY))
501 break;
502 udelay(20);
503 }
504 if (i == 0) {
505 netdev_err(adapter->netdev, "pch-gbe.miim won't go Ready\n");
506 spin_unlock_irqrestore(&hw->miim_lock, flags);
507 return 0;
508 }
509 iowrite32(((reg << PCH_GBE_MIIM_REG_ADDR_SHIFT) |
510 (addr << PCH_GBE_MIIM_PHY_ADDR_SHIFT) |
511 dir | data), &hw->reg->MIIM);
512 for (i = 0; i < 100; i++) {
513 udelay(20);
514 data_out = ioread32(&hw->reg->MIIM);
515 if ((data_out & PCH_GBE_MIIM_OPER_READY))
516 break;
517 }
518 spin_unlock_irqrestore(&hw->miim_lock, flags);
519
520 netdev_dbg(adapter->netdev, "PHY %s: reg=%d, data=0x%04X\n",
521 dir == PCH_GBE_MIIM_OPER_READ ? "READ" : "WRITE", reg,
522 dir == PCH_GBE_MIIM_OPER_READ ? data_out : data);
523 return (u16) data_out;
524}
525
526
527
528
529
530static void pch_gbe_mac_set_pause_packet(struct pch_gbe_hw *hw)
531{
532 struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
533 unsigned long tmp2, tmp3;
534
535
536 tmp2 = hw->mac.addr[1];
537 tmp2 = (tmp2 << 8) | hw->mac.addr[0];
538 tmp2 = PCH_GBE_PAUSE_PKT2_VALUE | (tmp2 << 16);
539
540 tmp3 = hw->mac.addr[5];
541 tmp3 = (tmp3 << 8) | hw->mac.addr[4];
542 tmp3 = (tmp3 << 8) | hw->mac.addr[3];
543 tmp3 = (tmp3 << 8) | hw->mac.addr[2];
544
545 iowrite32(PCH_GBE_PAUSE_PKT1_VALUE, &hw->reg->PAUSE_PKT1);
546 iowrite32(tmp2, &hw->reg->PAUSE_PKT2);
547 iowrite32(tmp3, &hw->reg->PAUSE_PKT3);
548 iowrite32(PCH_GBE_PAUSE_PKT4_VALUE, &hw->reg->PAUSE_PKT4);
549 iowrite32(PCH_GBE_PAUSE_PKT5_VALUE, &hw->reg->PAUSE_PKT5);
550
551
552 iowrite32(PCH_GBE_PS_PKT_RQ, &hw->reg->PAUSE_REQ);
553
554 netdev_dbg(adapter->netdev,
555 "PAUSE_PKT1-5 reg : 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
556 ioread32(&hw->reg->PAUSE_PKT1),
557 ioread32(&hw->reg->PAUSE_PKT2),
558 ioread32(&hw->reg->PAUSE_PKT3),
559 ioread32(&hw->reg->PAUSE_PKT4),
560 ioread32(&hw->reg->PAUSE_PKT5));
561
562 return;
563}
564
565
566
567
568
569
570
571
572
573static int pch_gbe_alloc_queues(struct pch_gbe_adapter *adapter)
574{
575 adapter->tx_ring = devm_kzalloc(&adapter->pdev->dev,
576 sizeof(*adapter->tx_ring), GFP_KERNEL);
577 if (!adapter->tx_ring)
578 return -ENOMEM;
579
580 adapter->rx_ring = devm_kzalloc(&adapter->pdev->dev,
581 sizeof(*adapter->rx_ring), GFP_KERNEL);
582 if (!adapter->rx_ring)
583 return -ENOMEM;
584 return 0;
585}
586
587
588
589
590
591static void pch_gbe_init_stats(struct pch_gbe_adapter *adapter)
592{
593 memset(&adapter->stats, 0, sizeof(adapter->stats));
594 return;
595}
596
597
598
599
600
601
602
603
604static int pch_gbe_init_phy(struct pch_gbe_adapter *adapter)
605{
606 struct net_device *netdev = adapter->netdev;
607 u32 addr;
608 u16 bmcr, stat;
609
610
611 for (addr = 0; addr < PCH_GBE_PHY_REGS_LEN; addr++) {
612 adapter->mii.phy_id = (addr == 0) ? 1 : (addr == 1) ? 0 : addr;
613 bmcr = pch_gbe_mdio_read(netdev, adapter->mii.phy_id, MII_BMCR);
614 stat = pch_gbe_mdio_read(netdev, adapter->mii.phy_id, MII_BMSR);
615 stat = pch_gbe_mdio_read(netdev, adapter->mii.phy_id, MII_BMSR);
616 if (!((bmcr == 0xFFFF) || ((stat == 0) && (bmcr == 0))))
617 break;
618 }
619 adapter->hw.phy.addr = adapter->mii.phy_id;
620 netdev_dbg(netdev, "phy_addr = %d\n", adapter->mii.phy_id);
621 if (addr == PCH_GBE_PHY_REGS_LEN)
622 return -EAGAIN;
623
624 for (addr = 0; addr < PCH_GBE_PHY_REGS_LEN; addr++) {
625 if (addr != adapter->mii.phy_id) {
626 pch_gbe_mdio_write(netdev, addr, MII_BMCR,
627 BMCR_ISOLATE);
628 } else {
629 bmcr = pch_gbe_mdio_read(netdev, addr, MII_BMCR);
630 pch_gbe_mdio_write(netdev, addr, MII_BMCR,
631 bmcr & ~BMCR_ISOLATE);
632 }
633 }
634
635
636 adapter->mii.phy_id_mask = 0x1F;
637 adapter->mii.reg_num_mask = 0x1F;
638 adapter->mii.dev = adapter->netdev;
639 adapter->mii.mdio_read = pch_gbe_mdio_read;
640 adapter->mii.mdio_write = pch_gbe_mdio_write;
641 adapter->mii.supports_gmii = mii_check_gmii_support(&adapter->mii);
642 return 0;
643}
644
645
646
647
648
649
650
651
652
653
654static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg)
655{
656 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
657 struct pch_gbe_hw *hw = &adapter->hw;
658
659 return pch_gbe_mac_ctrl_miim(hw, addr, PCH_GBE_HAL_MIIM_READ, reg,
660 (u16) 0);
661}
662
663
664
665
666
667
668
669
670static void pch_gbe_mdio_write(struct net_device *netdev,
671 int addr, int reg, int data)
672{
673 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
674 struct pch_gbe_hw *hw = &adapter->hw;
675
676 pch_gbe_mac_ctrl_miim(hw, addr, PCH_GBE_HAL_MIIM_WRITE, reg, data);
677}
678
679
680
681
682
683static void pch_gbe_reset_task(struct work_struct *work)
684{
685 struct pch_gbe_adapter *adapter;
686 adapter = container_of(work, struct pch_gbe_adapter, reset_task);
687
688 rtnl_lock();
689 pch_gbe_reinit_locked(adapter);
690 rtnl_unlock();
691}
692
693
694
695
696
697void pch_gbe_reinit_locked(struct pch_gbe_adapter *adapter)
698{
699 pch_gbe_down(adapter);
700 pch_gbe_up(adapter);
701}
702
703
704
705
706
707void pch_gbe_reset(struct pch_gbe_adapter *adapter)
708{
709 struct net_device *netdev = adapter->netdev;
710 struct pch_gbe_hw *hw = &adapter->hw;
711 s32 ret_val;
712
713 pch_gbe_mac_reset_hw(hw);
714
715 pch_gbe_set_multi(netdev);
716
717 pch_gbe_mac_init_rx_addrs(hw, PCH_GBE_MAR_ENTRIES);
718
719 ret_val = pch_gbe_phy_get_id(hw);
720 if (ret_val) {
721 netdev_err(adapter->netdev, "pch_gbe_phy_get_id error\n");
722 return;
723 }
724 pch_gbe_phy_init_setting(hw);
725
726 pch_gbe_phy_set_rgmii(hw);
727}
728
729
730
731
732
733static void pch_gbe_free_irq(struct pch_gbe_adapter *adapter)
734{
735 struct net_device *netdev = adapter->netdev;
736
737 free_irq(adapter->irq, netdev);
738 pci_free_irq_vectors(adapter->pdev);
739}
740
741
742
743
744
745static void pch_gbe_irq_disable(struct pch_gbe_adapter *adapter)
746{
747 struct pch_gbe_hw *hw = &adapter->hw;
748
749 atomic_inc(&adapter->irq_sem);
750 iowrite32(0, &hw->reg->INT_EN);
751 ioread32(&hw->reg->INT_ST);
752 synchronize_irq(adapter->irq);
753
754 netdev_dbg(adapter->netdev, "INT_EN reg : 0x%08x\n",
755 ioread32(&hw->reg->INT_EN));
756}
757
758
759
760
761
762static void pch_gbe_irq_enable(struct pch_gbe_adapter *adapter)
763{
764 struct pch_gbe_hw *hw = &adapter->hw;
765
766 if (likely(atomic_dec_and_test(&adapter->irq_sem)))
767 iowrite32(PCH_GBE_INT_ENABLE_MASK, &hw->reg->INT_EN);
768 ioread32(&hw->reg->INT_ST);
769 netdev_dbg(adapter->netdev, "INT_EN reg : 0x%08x\n",
770 ioread32(&hw->reg->INT_EN));
771}
772
773
774
775
776
777
778
779static void pch_gbe_setup_tctl(struct pch_gbe_adapter *adapter)
780{
781 struct pch_gbe_hw *hw = &adapter->hw;
782 u32 tx_mode, tcpip;
783
784 tx_mode = PCH_GBE_TM_LONG_PKT |
785 PCH_GBE_TM_ST_AND_FD |
786 PCH_GBE_TM_SHORT_PKT |
787 PCH_GBE_TM_TH_TX_STRT_8 |
788 PCH_GBE_TM_TH_ALM_EMP_4 | PCH_GBE_TM_TH_ALM_FULL_8;
789
790 iowrite32(tx_mode, &hw->reg->TX_MODE);
791
792 tcpip = ioread32(&hw->reg->TCPIP_ACC);
793 tcpip |= PCH_GBE_TX_TCPIPACC_EN;
794 iowrite32(tcpip, &hw->reg->TCPIP_ACC);
795 return;
796}
797
798
799
800
801
802static void pch_gbe_configure_tx(struct pch_gbe_adapter *adapter)
803{
804 struct pch_gbe_hw *hw = &adapter->hw;
805 u32 tdba, tdlen, dctrl;
806
807 netdev_dbg(adapter->netdev, "dma addr = 0x%08llx size = 0x%08x\n",
808 (unsigned long long)adapter->tx_ring->dma,
809 adapter->tx_ring->size);
810
811
812 tdba = adapter->tx_ring->dma;
813 tdlen = adapter->tx_ring->size - 0x10;
814 iowrite32(tdba, &hw->reg->TX_DSC_BASE);
815 iowrite32(tdlen, &hw->reg->TX_DSC_SIZE);
816 iowrite32(tdba, &hw->reg->TX_DSC_SW_P);
817
818
819 dctrl = ioread32(&hw->reg->DMA_CTRL);
820 dctrl |= PCH_GBE_TX_DMA_EN;
821 iowrite32(dctrl, &hw->reg->DMA_CTRL);
822}
823
824
825
826
827
828static void pch_gbe_setup_rctl(struct pch_gbe_adapter *adapter)
829{
830 struct pch_gbe_hw *hw = &adapter->hw;
831 u32 rx_mode, tcpip;
832
833 rx_mode = PCH_GBE_ADD_FIL_EN | PCH_GBE_MLT_FIL_EN |
834 PCH_GBE_RH_ALM_EMP_4 | PCH_GBE_RH_ALM_FULL_4 | PCH_GBE_RH_RD_TRG_8;
835
836 iowrite32(rx_mode, &hw->reg->RX_MODE);
837
838 tcpip = ioread32(&hw->reg->TCPIP_ACC);
839
840 tcpip |= PCH_GBE_RX_TCPIPACC_OFF;
841 tcpip &= ~PCH_GBE_RX_TCPIPACC_EN;
842 iowrite32(tcpip, &hw->reg->TCPIP_ACC);
843 return;
844}
845
846
847
848
849
850static void pch_gbe_configure_rx(struct pch_gbe_adapter *adapter)
851{
852 struct pch_gbe_hw *hw = &adapter->hw;
853 u32 rdba, rdlen, rxdma;
854
855 netdev_dbg(adapter->netdev, "dma adr = 0x%08llx size = 0x%08x\n",
856 (unsigned long long)adapter->rx_ring->dma,
857 adapter->rx_ring->size);
858
859 pch_gbe_mac_force_mac_fc(hw);
860
861 pch_gbe_disable_mac_rx(hw);
862
863
864 rxdma = ioread32(&hw->reg->DMA_CTRL);
865 rxdma &= ~PCH_GBE_RX_DMA_EN;
866 iowrite32(rxdma, &hw->reg->DMA_CTRL);
867
868 netdev_dbg(adapter->netdev,
869 "MAC_RX_EN reg = 0x%08x DMA_CTRL reg = 0x%08x\n",
870 ioread32(&hw->reg->MAC_RX_EN),
871 ioread32(&hw->reg->DMA_CTRL));
872
873
874
875 rdba = adapter->rx_ring->dma;
876 rdlen = adapter->rx_ring->size - 0x10;
877 iowrite32(rdba, &hw->reg->RX_DSC_BASE);
878 iowrite32(rdlen, &hw->reg->RX_DSC_SIZE);
879 iowrite32((rdba + rdlen), &hw->reg->RX_DSC_SW_P);
880}
881
882
883
884
885
886
887static void pch_gbe_unmap_and_free_tx_resource(
888 struct pch_gbe_adapter *adapter, struct pch_gbe_buffer *buffer_info)
889{
890 if (buffer_info->mapped) {
891 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
892 buffer_info->length, DMA_TO_DEVICE);
893 buffer_info->mapped = false;
894 }
895 if (buffer_info->skb) {
896 dev_kfree_skb_any(buffer_info->skb);
897 buffer_info->skb = NULL;
898 }
899}
900
901
902
903
904
905
906static void pch_gbe_unmap_and_free_rx_resource(
907 struct pch_gbe_adapter *adapter,
908 struct pch_gbe_buffer *buffer_info)
909{
910 if (buffer_info->mapped) {
911 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
912 buffer_info->length, DMA_FROM_DEVICE);
913 buffer_info->mapped = false;
914 }
915 if (buffer_info->skb) {
916 dev_kfree_skb_any(buffer_info->skb);
917 buffer_info->skb = NULL;
918 }
919}
920
921
922
923
924
925
926static void pch_gbe_clean_tx_ring(struct pch_gbe_adapter *adapter,
927 struct pch_gbe_tx_ring *tx_ring)
928{
929 struct pch_gbe_hw *hw = &adapter->hw;
930 struct pch_gbe_buffer *buffer_info;
931 unsigned long size;
932 unsigned int i;
933
934
935 for (i = 0; i < tx_ring->count; i++) {
936 buffer_info = &tx_ring->buffer_info[i];
937 pch_gbe_unmap_and_free_tx_resource(adapter, buffer_info);
938 }
939 netdev_dbg(adapter->netdev,
940 "call pch_gbe_unmap_and_free_tx_resource() %d count\n", i);
941
942 size = (unsigned long)sizeof(struct pch_gbe_buffer) * tx_ring->count;
943 memset(tx_ring->buffer_info, 0, size);
944
945
946 memset(tx_ring->desc, 0, tx_ring->size);
947 tx_ring->next_to_use = 0;
948 tx_ring->next_to_clean = 0;
949 iowrite32(tx_ring->dma, &hw->reg->TX_DSC_HW_P);
950 iowrite32((tx_ring->size - 0x10), &hw->reg->TX_DSC_SIZE);
951}
952
953
954
955
956
957
958static void
959pch_gbe_clean_rx_ring(struct pch_gbe_adapter *adapter,
960 struct pch_gbe_rx_ring *rx_ring)
961{
962 struct pch_gbe_hw *hw = &adapter->hw;
963 struct pch_gbe_buffer *buffer_info;
964 unsigned long size;
965 unsigned int i;
966
967
968 for (i = 0; i < rx_ring->count; i++) {
969 buffer_info = &rx_ring->buffer_info[i];
970 pch_gbe_unmap_and_free_rx_resource(adapter, buffer_info);
971 }
972 netdev_dbg(adapter->netdev,
973 "call pch_gbe_unmap_and_free_rx_resource() %d count\n", i);
974 size = (unsigned long)sizeof(struct pch_gbe_buffer) * rx_ring->count;
975 memset(rx_ring->buffer_info, 0, size);
976
977
978 memset(rx_ring->desc, 0, rx_ring->size);
979 rx_ring->next_to_clean = 0;
980 rx_ring->next_to_use = 0;
981 iowrite32(rx_ring->dma, &hw->reg->RX_DSC_HW_P);
982 iowrite32((rx_ring->size - 0x10), &hw->reg->RX_DSC_SIZE);
983}
984
985static void pch_gbe_set_rgmii_ctrl(struct pch_gbe_adapter *adapter, u16 speed,
986 u16 duplex)
987{
988 struct pch_gbe_hw *hw = &adapter->hw;
989 unsigned long rgmii = 0;
990
991
992 switch (speed) {
993 case SPEED_10:
994 rgmii = (PCH_GBE_RGMII_RATE_2_5M |
995 PCH_GBE_MAC_RGMII_CTRL_SETTING);
996 break;
997 case SPEED_100:
998 rgmii = (PCH_GBE_RGMII_RATE_25M |
999 PCH_GBE_MAC_RGMII_CTRL_SETTING);
1000 break;
1001 case SPEED_1000:
1002 rgmii = (PCH_GBE_RGMII_RATE_125M |
1003 PCH_GBE_MAC_RGMII_CTRL_SETTING);
1004 break;
1005 }
1006 iowrite32(rgmii, &hw->reg->RGMII_CTRL);
1007}
1008static void pch_gbe_set_mode(struct pch_gbe_adapter *adapter, u16 speed,
1009 u16 duplex)
1010{
1011 struct net_device *netdev = adapter->netdev;
1012 struct pch_gbe_hw *hw = &adapter->hw;
1013 unsigned long mode = 0;
1014
1015
1016 switch (speed) {
1017 case SPEED_10:
1018 mode = PCH_GBE_MODE_MII_ETHER;
1019 netdev->tx_queue_len = 10;
1020 break;
1021 case SPEED_100:
1022 mode = PCH_GBE_MODE_MII_ETHER;
1023 netdev->tx_queue_len = 100;
1024 break;
1025 case SPEED_1000:
1026 mode = PCH_GBE_MODE_GMII_ETHER;
1027 break;
1028 }
1029 if (duplex == DUPLEX_FULL)
1030 mode |= PCH_GBE_MODE_FULL_DUPLEX;
1031 else
1032 mode |= PCH_GBE_MODE_HALF_DUPLEX;
1033 iowrite32(mode, &hw->reg->MODE);
1034}
1035
1036
1037
1038
1039
1040static void pch_gbe_watchdog(struct timer_list *t)
1041{
1042 struct pch_gbe_adapter *adapter = from_timer(adapter, t,
1043 watchdog_timer);
1044 struct net_device *netdev = adapter->netdev;
1045 struct pch_gbe_hw *hw = &adapter->hw;
1046
1047 netdev_dbg(netdev, "right now = %ld\n", jiffies);
1048
1049 pch_gbe_update_stats(adapter);
1050 if ((mii_link_ok(&adapter->mii)) && (!netif_carrier_ok(netdev))) {
1051 struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET };
1052 netdev->tx_queue_len = adapter->tx_queue_len;
1053
1054 if (mii_ethtool_gset(&adapter->mii, &cmd)) {
1055 netdev_err(netdev, "ethtool get setting Error\n");
1056 mod_timer(&adapter->watchdog_timer,
1057 round_jiffies(jiffies +
1058 PCH_GBE_WATCHDOG_PERIOD));
1059 return;
1060 }
1061 hw->mac.link_speed = ethtool_cmd_speed(&cmd);
1062 hw->mac.link_duplex = cmd.duplex;
1063
1064 pch_gbe_set_rgmii_ctrl(adapter, hw->mac.link_speed,
1065 hw->mac.link_duplex);
1066
1067 pch_gbe_set_mode(adapter, hw->mac.link_speed,
1068 hw->mac.link_duplex);
1069 netdev_dbg(netdev,
1070 "Link is Up %d Mbps %s-Duplex\n",
1071 hw->mac.link_speed,
1072 cmd.duplex == DUPLEX_FULL ? "Full" : "Half");
1073 netif_carrier_on(netdev);
1074 netif_wake_queue(netdev);
1075 } else if ((!mii_link_ok(&adapter->mii)) &&
1076 (netif_carrier_ok(netdev))) {
1077 netdev_dbg(netdev, "NIC Link is Down\n");
1078 hw->mac.link_speed = SPEED_10;
1079 hw->mac.link_duplex = DUPLEX_HALF;
1080 netif_carrier_off(netdev);
1081 netif_stop_queue(netdev);
1082 }
1083 mod_timer(&adapter->watchdog_timer,
1084 round_jiffies(jiffies + PCH_GBE_WATCHDOG_PERIOD));
1085}
1086
1087
1088
1089
1090
1091
1092
1093static void pch_gbe_tx_queue(struct pch_gbe_adapter *adapter,
1094 struct pch_gbe_tx_ring *tx_ring,
1095 struct sk_buff *skb)
1096{
1097 struct pch_gbe_hw *hw = &adapter->hw;
1098 struct pch_gbe_tx_desc *tx_desc;
1099 struct pch_gbe_buffer *buffer_info;
1100 struct sk_buff *tmp_skb;
1101 unsigned int frame_ctrl;
1102 unsigned int ring_num;
1103
1104
1105 frame_ctrl = 0;
1106 if (unlikely(skb->len < PCH_GBE_SHORT_PKT))
1107 frame_ctrl |= PCH_GBE_TXD_CTRL_APAD;
1108 if (skb->ip_summed == CHECKSUM_NONE)
1109 frame_ctrl |= PCH_GBE_TXD_CTRL_TCPIP_ACC_OFF;
1110
1111
1112
1113
1114
1115
1116 if (skb->len < PCH_GBE_SHORT_PKT && skb->ip_summed != CHECKSUM_NONE) {
1117 frame_ctrl |= PCH_GBE_TXD_CTRL_APAD |
1118 PCH_GBE_TXD_CTRL_TCPIP_ACC_OFF;
1119 if (skb->protocol == htons(ETH_P_IP)) {
1120 struct iphdr *iph = ip_hdr(skb);
1121 unsigned int offset;
1122 offset = skb_transport_offset(skb);
1123 if (iph->protocol == IPPROTO_TCP) {
1124 skb->csum = 0;
1125 tcp_hdr(skb)->check = 0;
1126 skb->csum = skb_checksum(skb, offset,
1127 skb->len - offset, 0);
1128 tcp_hdr(skb)->check =
1129 csum_tcpudp_magic(iph->saddr,
1130 iph->daddr,
1131 skb->len - offset,
1132 IPPROTO_TCP,
1133 skb->csum);
1134 } else if (iph->protocol == IPPROTO_UDP) {
1135 skb->csum = 0;
1136 udp_hdr(skb)->check = 0;
1137 skb->csum =
1138 skb_checksum(skb, offset,
1139 skb->len - offset, 0);
1140 udp_hdr(skb)->check =
1141 csum_tcpudp_magic(iph->saddr,
1142 iph->daddr,
1143 skb->len - offset,
1144 IPPROTO_UDP,
1145 skb->csum);
1146 }
1147 }
1148 }
1149
1150 ring_num = tx_ring->next_to_use;
1151 if (unlikely((ring_num + 1) == tx_ring->count))
1152 tx_ring->next_to_use = 0;
1153 else
1154 tx_ring->next_to_use = ring_num + 1;
1155
1156
1157 buffer_info = &tx_ring->buffer_info[ring_num];
1158 tmp_skb = buffer_info->skb;
1159
1160
1161 memcpy(tmp_skb->data, skb->data, ETH_HLEN);
1162 tmp_skb->data[ETH_HLEN] = 0x00;
1163 tmp_skb->data[ETH_HLEN + 1] = 0x00;
1164 tmp_skb->len = skb->len;
1165 memcpy(&tmp_skb->data[ETH_HLEN + 2], &skb->data[ETH_HLEN],
1166 (skb->len - ETH_HLEN));
1167
1168 buffer_info->length = tmp_skb->len;
1169 buffer_info->dma = dma_map_single(&adapter->pdev->dev, tmp_skb->data,
1170 buffer_info->length,
1171 DMA_TO_DEVICE);
1172 if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) {
1173 netdev_err(adapter->netdev, "TX DMA map failed\n");
1174 buffer_info->dma = 0;
1175 buffer_info->time_stamp = 0;
1176 tx_ring->next_to_use = ring_num;
1177 return;
1178 }
1179 buffer_info->mapped = true;
1180 buffer_info->time_stamp = jiffies;
1181
1182
1183 tx_desc = PCH_GBE_TX_DESC(*tx_ring, ring_num);
1184 tx_desc->buffer_addr = (buffer_info->dma);
1185 tx_desc->length = (tmp_skb->len);
1186 tx_desc->tx_words_eob = ((tmp_skb->len + 3));
1187 tx_desc->tx_frame_ctrl = (frame_ctrl);
1188 tx_desc->gbec_status = (DSC_INIT16);
1189
1190 if (unlikely(++ring_num == tx_ring->count))
1191 ring_num = 0;
1192
1193
1194 iowrite32(tx_ring->dma +
1195 (int)sizeof(struct pch_gbe_tx_desc) * ring_num,
1196 &hw->reg->TX_DSC_SW_P);
1197
1198 pch_tx_timestamp(adapter, skb);
1199
1200 dev_kfree_skb_any(skb);
1201}
1202
1203
1204
1205
1206
1207void pch_gbe_update_stats(struct pch_gbe_adapter *adapter)
1208{
1209 struct net_device *netdev = adapter->netdev;
1210 struct pci_dev *pdev = adapter->pdev;
1211 struct pch_gbe_hw_stats *stats = &adapter->stats;
1212 unsigned long flags;
1213
1214
1215
1216
1217
1218 if ((pdev->error_state) && (pdev->error_state != pci_channel_io_normal))
1219 return;
1220
1221 spin_lock_irqsave(&adapter->stats_lock, flags);
1222
1223
1224 stats->rx_errors = stats->rx_crc_errors + stats->rx_frame_errors;
1225 stats->tx_errors = stats->tx_length_errors +
1226 stats->tx_aborted_errors +
1227 stats->tx_carrier_errors + stats->tx_timeout_count;
1228
1229
1230 netdev->stats.rx_packets = stats->rx_packets;
1231 netdev->stats.rx_bytes = stats->rx_bytes;
1232 netdev->stats.rx_dropped = stats->rx_dropped;
1233 netdev->stats.tx_packets = stats->tx_packets;
1234 netdev->stats.tx_bytes = stats->tx_bytes;
1235 netdev->stats.tx_dropped = stats->tx_dropped;
1236
1237 netdev->stats.multicast = stats->multicast;
1238 netdev->stats.collisions = stats->collisions;
1239
1240 netdev->stats.rx_errors = stats->rx_errors;
1241 netdev->stats.rx_crc_errors = stats->rx_crc_errors;
1242 netdev->stats.rx_frame_errors = stats->rx_frame_errors;
1243
1244 netdev->stats.tx_errors = stats->tx_errors;
1245 netdev->stats.tx_aborted_errors = stats->tx_aborted_errors;
1246 netdev->stats.tx_carrier_errors = stats->tx_carrier_errors;
1247
1248 spin_unlock_irqrestore(&adapter->stats_lock, flags);
1249}
1250
1251static void pch_gbe_disable_dma_rx(struct pch_gbe_hw *hw)
1252{
1253 u32 rxdma;
1254
1255
1256 rxdma = ioread32(&hw->reg->DMA_CTRL);
1257 rxdma &= ~PCH_GBE_RX_DMA_EN;
1258 iowrite32(rxdma, &hw->reg->DMA_CTRL);
1259}
1260
1261static void pch_gbe_enable_dma_rx(struct pch_gbe_hw *hw)
1262{
1263 u32 rxdma;
1264
1265
1266 rxdma = ioread32(&hw->reg->DMA_CTRL);
1267 rxdma |= PCH_GBE_RX_DMA_EN;
1268 iowrite32(rxdma, &hw->reg->DMA_CTRL);
1269}
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279static irqreturn_t pch_gbe_intr(int irq, void *data)
1280{
1281 struct net_device *netdev = data;
1282 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
1283 struct pch_gbe_hw *hw = &adapter->hw;
1284 u32 int_st;
1285 u32 int_en;
1286
1287
1288 int_st = ioread32(&hw->reg->INT_ST);
1289 int_st = int_st & ioread32(&hw->reg->INT_EN);
1290
1291 if (unlikely(!int_st))
1292 return IRQ_NONE;
1293 netdev_dbg(netdev, "%s occur int_st = 0x%08x\n", __func__, int_st);
1294 if (int_st & PCH_GBE_INT_RX_FRAME_ERR)
1295 adapter->stats.intr_rx_frame_err_count++;
1296 if (int_st & PCH_GBE_INT_RX_FIFO_ERR)
1297 if (!adapter->rx_stop_flag) {
1298 adapter->stats.intr_rx_fifo_err_count++;
1299 netdev_dbg(netdev, "Rx fifo over run\n");
1300 adapter->rx_stop_flag = true;
1301 int_en = ioread32(&hw->reg->INT_EN);
1302 iowrite32((int_en & ~PCH_GBE_INT_RX_FIFO_ERR),
1303 &hw->reg->INT_EN);
1304 pch_gbe_disable_dma_rx(&adapter->hw);
1305 int_st |= ioread32(&hw->reg->INT_ST);
1306 int_st = int_st & ioread32(&hw->reg->INT_EN);
1307 }
1308 if (int_st & PCH_GBE_INT_RX_DMA_ERR)
1309 adapter->stats.intr_rx_dma_err_count++;
1310 if (int_st & PCH_GBE_INT_TX_FIFO_ERR)
1311 adapter->stats.intr_tx_fifo_err_count++;
1312 if (int_st & PCH_GBE_INT_TX_DMA_ERR)
1313 adapter->stats.intr_tx_dma_err_count++;
1314 if (int_st & PCH_GBE_INT_TCPIP_ERR)
1315 adapter->stats.intr_tcpip_err_count++;
1316
1317 if ((int_st & PCH_GBE_INT_RX_DSC_EMP)) {
1318 adapter->stats.intr_rx_dsc_empty_count++;
1319 netdev_dbg(netdev, "Rx descriptor is empty\n");
1320 int_en = ioread32(&hw->reg->INT_EN);
1321 iowrite32((int_en & ~PCH_GBE_INT_RX_DSC_EMP), &hw->reg->INT_EN);
1322 if (hw->mac.tx_fc_enable) {
1323
1324 pch_gbe_mac_set_pause_packet(hw);
1325 }
1326 }
1327
1328
1329 if ((int_st & (PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT)) ||
1330 (adapter->rx_stop_flag)) {
1331 if (likely(napi_schedule_prep(&adapter->napi))) {
1332
1333 atomic_inc(&adapter->irq_sem);
1334 int_en = ioread32(&hw->reg->INT_EN);
1335 int_en &=
1336 ~(PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT);
1337 iowrite32(int_en, &hw->reg->INT_EN);
1338
1339 __napi_schedule(&adapter->napi);
1340 }
1341 }
1342 netdev_dbg(netdev, "return = 0x%08x INT_EN reg = 0x%08x\n",
1343 IRQ_HANDLED, ioread32(&hw->reg->INT_EN));
1344 return IRQ_HANDLED;
1345}
1346
1347
1348
1349
1350
1351
1352
1353static void
1354pch_gbe_alloc_rx_buffers(struct pch_gbe_adapter *adapter,
1355 struct pch_gbe_rx_ring *rx_ring, int cleaned_count)
1356{
1357 struct net_device *netdev = adapter->netdev;
1358 struct pci_dev *pdev = adapter->pdev;
1359 struct pch_gbe_hw *hw = &adapter->hw;
1360 struct pch_gbe_rx_desc *rx_desc;
1361 struct pch_gbe_buffer *buffer_info;
1362 struct sk_buff *skb;
1363 unsigned int i;
1364 unsigned int bufsz;
1365
1366 bufsz = adapter->rx_buffer_len + NET_IP_ALIGN;
1367 i = rx_ring->next_to_use;
1368
1369 while ((cleaned_count--)) {
1370 buffer_info = &rx_ring->buffer_info[i];
1371 skb = netdev_alloc_skb(netdev, bufsz);
1372 if (unlikely(!skb)) {
1373
1374 adapter->stats.rx_alloc_buff_failed++;
1375 break;
1376 }
1377
1378 skb_reserve(skb, NET_IP_ALIGN);
1379 buffer_info->skb = skb;
1380
1381 buffer_info->dma = dma_map_single(&pdev->dev,
1382 buffer_info->rx_buffer,
1383 buffer_info->length,
1384 DMA_FROM_DEVICE);
1385 if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) {
1386 dev_kfree_skb(skb);
1387 buffer_info->skb = NULL;
1388 buffer_info->dma = 0;
1389 adapter->stats.rx_alloc_buff_failed++;
1390 break;
1391 }
1392 buffer_info->mapped = true;
1393 rx_desc = PCH_GBE_RX_DESC(*rx_ring, i);
1394 rx_desc->buffer_addr = (buffer_info->dma);
1395 rx_desc->gbec_status = DSC_INIT16;
1396
1397 netdev_dbg(netdev,
1398 "i = %d buffer_info->dma = 0x08%llx buffer_info->length = 0x%x\n",
1399 i, (unsigned long long)buffer_info->dma,
1400 buffer_info->length);
1401
1402 if (unlikely(++i == rx_ring->count))
1403 i = 0;
1404 }
1405 if (likely(rx_ring->next_to_use != i)) {
1406 rx_ring->next_to_use = i;
1407 if (unlikely(i-- == 0))
1408 i = (rx_ring->count - 1);
1409 iowrite32(rx_ring->dma +
1410 (int)sizeof(struct pch_gbe_rx_desc) * i,
1411 &hw->reg->RX_DSC_SW_P);
1412 }
1413 return;
1414}
1415
1416static int
1417pch_gbe_alloc_rx_buffers_pool(struct pch_gbe_adapter *adapter,
1418 struct pch_gbe_rx_ring *rx_ring, int cleaned_count)
1419{
1420 struct pci_dev *pdev = adapter->pdev;
1421 struct pch_gbe_buffer *buffer_info;
1422 unsigned int i;
1423 unsigned int bufsz;
1424 unsigned int size;
1425
1426 bufsz = adapter->rx_buffer_len;
1427
1428 size = rx_ring->count * bufsz + PCH_GBE_RESERVE_MEMORY;
1429 rx_ring->rx_buff_pool =
1430 dma_alloc_coherent(&pdev->dev, size,
1431 &rx_ring->rx_buff_pool_logic, GFP_KERNEL);
1432 if (!rx_ring->rx_buff_pool)
1433 return -ENOMEM;
1434
1435 rx_ring->rx_buff_pool_size = size;
1436 for (i = 0; i < rx_ring->count; i++) {
1437 buffer_info = &rx_ring->buffer_info[i];
1438 buffer_info->rx_buffer = rx_ring->rx_buff_pool + bufsz * i;
1439 buffer_info->length = bufsz;
1440 }
1441 return 0;
1442}
1443
1444
1445
1446
1447
1448
1449static void pch_gbe_alloc_tx_buffers(struct pch_gbe_adapter *adapter,
1450 struct pch_gbe_tx_ring *tx_ring)
1451{
1452 struct pch_gbe_buffer *buffer_info;
1453 struct sk_buff *skb;
1454 unsigned int i;
1455 unsigned int bufsz;
1456 struct pch_gbe_tx_desc *tx_desc;
1457
1458 bufsz =
1459 adapter->hw.mac.max_frame_size + PCH_GBE_DMA_ALIGN + NET_IP_ALIGN;
1460
1461 for (i = 0; i < tx_ring->count; i++) {
1462 buffer_info = &tx_ring->buffer_info[i];
1463 skb = netdev_alloc_skb(adapter->netdev, bufsz);
1464 skb_reserve(skb, PCH_GBE_DMA_ALIGN);
1465 buffer_info->skb = skb;
1466 tx_desc = PCH_GBE_TX_DESC(*tx_ring, i);
1467 tx_desc->gbec_status = (DSC_INIT16);
1468 }
1469 return;
1470}
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480static bool
1481pch_gbe_clean_tx(struct pch_gbe_adapter *adapter,
1482 struct pch_gbe_tx_ring *tx_ring)
1483{
1484 struct pch_gbe_tx_desc *tx_desc;
1485 struct pch_gbe_buffer *buffer_info;
1486 struct sk_buff *skb;
1487 unsigned int i;
1488 unsigned int cleaned_count = 0;
1489 bool cleaned = false;
1490 int unused, thresh;
1491
1492 netdev_dbg(adapter->netdev, "next_to_clean : %d\n",
1493 tx_ring->next_to_clean);
1494
1495 i = tx_ring->next_to_clean;
1496 tx_desc = PCH_GBE_TX_DESC(*tx_ring, i);
1497 netdev_dbg(adapter->netdev, "gbec_status:0x%04x dma_status:0x%04x\n",
1498 tx_desc->gbec_status, tx_desc->dma_status);
1499
1500 unused = PCH_GBE_DESC_UNUSED(tx_ring);
1501 thresh = tx_ring->count - PCH_GBE_TX_WEIGHT;
1502 if ((tx_desc->gbec_status == DSC_INIT16) && (unused < thresh))
1503 {
1504 int j, k;
1505 if (unused < 8) {
1506 netdev_dbg(adapter->netdev,
1507 "clean_tx: transmit queue warning (%x,%x) unused=%d\n",
1508 tx_ring->next_to_clean, tx_ring->next_to_use,
1509 unused);
1510 }
1511
1512
1513 k = i;
1514 for (j = 0; j < PCH_GBE_TX_WEIGHT; j++)
1515 {
1516 tx_desc = PCH_GBE_TX_DESC(*tx_ring, k);
1517 if (tx_desc->gbec_status != DSC_INIT16) break;
1518 if (++k >= tx_ring->count) k = 0;
1519 }
1520 if (j < PCH_GBE_TX_WEIGHT) {
1521 netdev_dbg(adapter->netdev,
1522 "clean_tx: unused=%d loops=%d found tx_desc[%x,%x:%x].gbec_status=%04x\n",
1523 unused, j, i, k, tx_ring->next_to_use,
1524 tx_desc->gbec_status);
1525 i = k;
1526 }
1527 }
1528
1529 while ((tx_desc->gbec_status & DSC_INIT16) == 0x0000) {
1530 netdev_dbg(adapter->netdev, "gbec_status:0x%04x\n",
1531 tx_desc->gbec_status);
1532 buffer_info = &tx_ring->buffer_info[i];
1533 skb = buffer_info->skb;
1534 cleaned = true;
1535
1536 if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_ABT)) {
1537 adapter->stats.tx_aborted_errors++;
1538 netdev_err(adapter->netdev, "Transfer Abort Error\n");
1539 } else if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_CRSER)
1540 ) {
1541 adapter->stats.tx_carrier_errors++;
1542 netdev_err(adapter->netdev,
1543 "Transfer Carrier Sense Error\n");
1544 } else if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_EXCOL)
1545 ) {
1546 adapter->stats.tx_aborted_errors++;
1547 netdev_err(adapter->netdev,
1548 "Transfer Collision Abort Error\n");
1549 } else if ((tx_desc->gbec_status &
1550 (PCH_GBE_TXD_GMAC_STAT_SNGCOL |
1551 PCH_GBE_TXD_GMAC_STAT_MLTCOL))) {
1552 adapter->stats.collisions++;
1553 adapter->stats.tx_packets++;
1554 adapter->stats.tx_bytes += skb->len;
1555 netdev_dbg(adapter->netdev, "Transfer Collision\n");
1556 } else if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_CMPLT)
1557 ) {
1558 adapter->stats.tx_packets++;
1559 adapter->stats.tx_bytes += skb->len;
1560 }
1561 if (buffer_info->mapped) {
1562 netdev_dbg(adapter->netdev,
1563 "unmap buffer_info->dma : %d\n", i);
1564 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
1565 buffer_info->length, DMA_TO_DEVICE);
1566 buffer_info->mapped = false;
1567 }
1568 if (buffer_info->skb) {
1569 netdev_dbg(adapter->netdev,
1570 "trim buffer_info->skb : %d\n", i);
1571 skb_trim(buffer_info->skb, 0);
1572 }
1573 tx_desc->gbec_status = DSC_INIT16;
1574 if (unlikely(++i == tx_ring->count))
1575 i = 0;
1576 tx_desc = PCH_GBE_TX_DESC(*tx_ring, i);
1577
1578
1579 if (cleaned_count++ == PCH_GBE_TX_WEIGHT) {
1580 cleaned = false;
1581 break;
1582 }
1583 }
1584 netdev_dbg(adapter->netdev,
1585 "called pch_gbe_unmap_and_free_tx_resource() %d count\n",
1586 cleaned_count);
1587 if (cleaned_count > 0) {
1588
1589 netif_tx_lock(adapter->netdev);
1590 if (unlikely(cleaned && (netif_queue_stopped(adapter->netdev))))
1591 {
1592 netif_wake_queue(adapter->netdev);
1593 adapter->stats.tx_restart_count++;
1594 netdev_dbg(adapter->netdev, "Tx wake queue\n");
1595 }
1596
1597 tx_ring->next_to_clean = i;
1598
1599 netdev_dbg(adapter->netdev, "next_to_clean : %d\n",
1600 tx_ring->next_to_clean);
1601 netif_tx_unlock(adapter->netdev);
1602 }
1603 return cleaned;
1604}
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616static bool
1617pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
1618 struct pch_gbe_rx_ring *rx_ring,
1619 int *work_done, int work_to_do)
1620{
1621 struct net_device *netdev = adapter->netdev;
1622 struct pci_dev *pdev = adapter->pdev;
1623 struct pch_gbe_buffer *buffer_info;
1624 struct pch_gbe_rx_desc *rx_desc;
1625 u32 length;
1626 unsigned int i;
1627 unsigned int cleaned_count = 0;
1628 bool cleaned = false;
1629 struct sk_buff *skb;
1630 u8 dma_status;
1631 u16 gbec_status;
1632 u32 tcp_ip_status;
1633
1634 i = rx_ring->next_to_clean;
1635
1636 while (*work_done < work_to_do) {
1637
1638 rx_desc = PCH_GBE_RX_DESC(*rx_ring, i);
1639 if (rx_desc->gbec_status == DSC_INIT16)
1640 break;
1641 cleaned = true;
1642 cleaned_count++;
1643
1644 dma_status = rx_desc->dma_status;
1645 gbec_status = rx_desc->gbec_status;
1646 tcp_ip_status = rx_desc->tcp_ip_status;
1647 rx_desc->gbec_status = DSC_INIT16;
1648 buffer_info = &rx_ring->buffer_info[i];
1649 skb = buffer_info->skb;
1650 buffer_info->skb = NULL;
1651
1652
1653 dma_unmap_single(&pdev->dev, buffer_info->dma,
1654 buffer_info->length, DMA_FROM_DEVICE);
1655 buffer_info->mapped = false;
1656
1657 netdev_dbg(netdev,
1658 "RxDecNo = 0x%04x Status[DMA:0x%02x GBE:0x%04x TCP:0x%08x] BufInf = 0x%p\n",
1659 i, dma_status, gbec_status, tcp_ip_status,
1660 buffer_info);
1661
1662 if (unlikely(gbec_status & PCH_GBE_RXD_GMAC_STAT_NOTOCTAL)) {
1663 adapter->stats.rx_frame_errors++;
1664 netdev_err(netdev, "Receive Not Octal Error\n");
1665 } else if (unlikely(gbec_status &
1666 PCH_GBE_RXD_GMAC_STAT_NBLERR)) {
1667 adapter->stats.rx_frame_errors++;
1668 netdev_err(netdev, "Receive Nibble Error\n");
1669 } else if (unlikely(gbec_status &
1670 PCH_GBE_RXD_GMAC_STAT_CRCERR)) {
1671 adapter->stats.rx_crc_errors++;
1672 netdev_err(netdev, "Receive CRC Error\n");
1673 } else {
1674
1675
1676 length = (rx_desc->rx_words_eob) - 3 - ETH_FCS_LEN;
1677 if (rx_desc->rx_words_eob & 0x02)
1678 length = length - 4;
1679
1680
1681
1682
1683 memcpy(skb->data, buffer_info->rx_buffer, length);
1684
1685
1686 adapter->stats.rx_bytes += length;
1687 adapter->stats.rx_packets++;
1688 if ((gbec_status & PCH_GBE_RXD_GMAC_STAT_MARMLT))
1689 adapter->stats.multicast++;
1690
1691 skb_put(skb, length);
1692
1693 pch_rx_timestamp(adapter, skb);
1694
1695 skb->protocol = eth_type_trans(skb, netdev);
1696 if (tcp_ip_status & PCH_GBE_RXD_ACC_STAT_TCPIPOK)
1697 skb->ip_summed = CHECKSUM_UNNECESSARY;
1698 else
1699 skb->ip_summed = CHECKSUM_NONE;
1700
1701 napi_gro_receive(&adapter->napi, skb);
1702 (*work_done)++;
1703 netdev_dbg(netdev,
1704 "Receive skb->ip_summed: %d length: %d\n",
1705 skb->ip_summed, length);
1706 }
1707
1708 if (unlikely(cleaned_count >= PCH_GBE_RX_BUFFER_WRITE)) {
1709 pch_gbe_alloc_rx_buffers(adapter, rx_ring,
1710 cleaned_count);
1711 cleaned_count = 0;
1712 }
1713 if (++i == rx_ring->count)
1714 i = 0;
1715 }
1716 rx_ring->next_to_clean = i;
1717 if (cleaned_count)
1718 pch_gbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
1719 return cleaned;
1720}
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter,
1731 struct pch_gbe_tx_ring *tx_ring)
1732{
1733 struct pci_dev *pdev = adapter->pdev;
1734 struct pch_gbe_tx_desc *tx_desc;
1735 int size;
1736 int desNo;
1737
1738 size = (int)sizeof(struct pch_gbe_buffer) * tx_ring->count;
1739 tx_ring->buffer_info = vzalloc(size);
1740 if (!tx_ring->buffer_info)
1741 return -ENOMEM;
1742
1743 tx_ring->size = tx_ring->count * (int)sizeof(struct pch_gbe_tx_desc);
1744
1745 tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
1746 &tx_ring->dma, GFP_KERNEL);
1747 if (!tx_ring->desc) {
1748 vfree(tx_ring->buffer_info);
1749 return -ENOMEM;
1750 }
1751
1752 tx_ring->next_to_use = 0;
1753 tx_ring->next_to_clean = 0;
1754
1755 for (desNo = 0; desNo < tx_ring->count; desNo++) {
1756 tx_desc = PCH_GBE_TX_DESC(*tx_ring, desNo);
1757 tx_desc->gbec_status = DSC_INIT16;
1758 }
1759 netdev_dbg(adapter->netdev,
1760 "tx_ring->desc = 0x%p tx_ring->dma = 0x%08llx next_to_clean = 0x%08x next_to_use = 0x%08x\n",
1761 tx_ring->desc, (unsigned long long)tx_ring->dma,
1762 tx_ring->next_to_clean, tx_ring->next_to_use);
1763 return 0;
1764}
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774int pch_gbe_setup_rx_resources(struct pch_gbe_adapter *adapter,
1775 struct pch_gbe_rx_ring *rx_ring)
1776{
1777 struct pci_dev *pdev = adapter->pdev;
1778 struct pch_gbe_rx_desc *rx_desc;
1779 int size;
1780 int desNo;
1781
1782 size = (int)sizeof(struct pch_gbe_buffer) * rx_ring->count;
1783 rx_ring->buffer_info = vzalloc(size);
1784 if (!rx_ring->buffer_info)
1785 return -ENOMEM;
1786
1787 rx_ring->size = rx_ring->count * (int)sizeof(struct pch_gbe_rx_desc);
1788 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
1789 &rx_ring->dma, GFP_KERNEL);
1790 if (!rx_ring->desc) {
1791 vfree(rx_ring->buffer_info);
1792 return -ENOMEM;
1793 }
1794 rx_ring->next_to_clean = 0;
1795 rx_ring->next_to_use = 0;
1796 for (desNo = 0; desNo < rx_ring->count; desNo++) {
1797 rx_desc = PCH_GBE_RX_DESC(*rx_ring, desNo);
1798 rx_desc->gbec_status = DSC_INIT16;
1799 }
1800 netdev_dbg(adapter->netdev,
1801 "rx_ring->desc = 0x%p rx_ring->dma = 0x%08llx next_to_clean = 0x%08x next_to_use = 0x%08x\n",
1802 rx_ring->desc, (unsigned long long)rx_ring->dma,
1803 rx_ring->next_to_clean, rx_ring->next_to_use);
1804 return 0;
1805}
1806
1807
1808
1809
1810
1811
1812void pch_gbe_free_tx_resources(struct pch_gbe_adapter *adapter,
1813 struct pch_gbe_tx_ring *tx_ring)
1814{
1815 struct pci_dev *pdev = adapter->pdev;
1816
1817 pch_gbe_clean_tx_ring(adapter, tx_ring);
1818 vfree(tx_ring->buffer_info);
1819 tx_ring->buffer_info = NULL;
1820 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
1821 tx_ring->dma);
1822 tx_ring->desc = NULL;
1823}
1824
1825
1826
1827
1828
1829
1830void pch_gbe_free_rx_resources(struct pch_gbe_adapter *adapter,
1831 struct pch_gbe_rx_ring *rx_ring)
1832{
1833 struct pci_dev *pdev = adapter->pdev;
1834
1835 pch_gbe_clean_rx_ring(adapter, rx_ring);
1836 vfree(rx_ring->buffer_info);
1837 rx_ring->buffer_info = NULL;
1838 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
1839 rx_ring->dma);
1840 rx_ring->desc = NULL;
1841}
1842
1843
1844
1845
1846
1847
1848
1849
1850static int pch_gbe_request_irq(struct pch_gbe_adapter *adapter)
1851{
1852 struct net_device *netdev = adapter->netdev;
1853 int err;
1854
1855 err = pci_alloc_irq_vectors(adapter->pdev, 1, 1, PCI_IRQ_ALL_TYPES);
1856 if (err < 0)
1857 return err;
1858
1859 adapter->irq = pci_irq_vector(adapter->pdev, 0);
1860
1861 err = request_irq(adapter->irq, &pch_gbe_intr, IRQF_SHARED,
1862 netdev->name, netdev);
1863 if (err)
1864 netdev_err(netdev, "Unable to allocate interrupt Error: %d\n",
1865 err);
1866 netdev_dbg(netdev, "have_msi : %d return : 0x%04x\n",
1867 pci_dev_msi_enabled(adapter->pdev), err);
1868 return err;
1869}
1870
1871
1872
1873
1874
1875
1876
1877
1878int pch_gbe_up(struct pch_gbe_adapter *adapter)
1879{
1880 struct net_device *netdev = adapter->netdev;
1881 struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring;
1882 struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring;
1883 int err = -EINVAL;
1884
1885
1886 if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
1887 netdev_err(netdev, "Error: Invalid MAC address\n");
1888 goto out;
1889 }
1890
1891
1892 pch_gbe_set_multi(netdev);
1893
1894 pch_gbe_setup_tctl(adapter);
1895 pch_gbe_configure_tx(adapter);
1896 pch_gbe_setup_rctl(adapter);
1897 pch_gbe_configure_rx(adapter);
1898
1899 err = pch_gbe_request_irq(adapter);
1900 if (err) {
1901 netdev_err(netdev,
1902 "Error: can't bring device up - irq request failed\n");
1903 goto out;
1904 }
1905 err = pch_gbe_alloc_rx_buffers_pool(adapter, rx_ring, rx_ring->count);
1906 if (err) {
1907 netdev_err(netdev,
1908 "Error: can't bring device up - alloc rx buffers pool failed\n");
1909 goto freeirq;
1910 }
1911 pch_gbe_alloc_tx_buffers(adapter, tx_ring);
1912 pch_gbe_alloc_rx_buffers(adapter, rx_ring, rx_ring->count);
1913 adapter->tx_queue_len = netdev->tx_queue_len;
1914 pch_gbe_enable_dma_rx(&adapter->hw);
1915 pch_gbe_enable_mac_rx(&adapter->hw);
1916
1917 mod_timer(&adapter->watchdog_timer, jiffies);
1918
1919 napi_enable(&adapter->napi);
1920 pch_gbe_irq_enable(adapter);
1921 netif_start_queue(adapter->netdev);
1922
1923 return 0;
1924
1925freeirq:
1926 pch_gbe_free_irq(adapter);
1927out:
1928 return err;
1929}
1930
1931
1932
1933
1934
1935void pch_gbe_down(struct pch_gbe_adapter *adapter)
1936{
1937 struct net_device *netdev = adapter->netdev;
1938 struct pci_dev *pdev = adapter->pdev;
1939 struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring;
1940
1941
1942
1943 napi_disable(&adapter->napi);
1944 atomic_set(&adapter->irq_sem, 0);
1945
1946 pch_gbe_irq_disable(adapter);
1947 pch_gbe_free_irq(adapter);
1948
1949 del_timer_sync(&adapter->watchdog_timer);
1950
1951 netdev->tx_queue_len = adapter->tx_queue_len;
1952 netif_carrier_off(netdev);
1953 netif_stop_queue(netdev);
1954
1955 if ((pdev->error_state) && (pdev->error_state != pci_channel_io_normal))
1956 pch_gbe_reset(adapter);
1957 pch_gbe_clean_tx_ring(adapter, adapter->tx_ring);
1958 pch_gbe_clean_rx_ring(adapter, adapter->rx_ring);
1959
1960 dma_free_coherent(&adapter->pdev->dev, rx_ring->rx_buff_pool_size,
1961 rx_ring->rx_buff_pool, rx_ring->rx_buff_pool_logic);
1962 rx_ring->rx_buff_pool_logic = 0;
1963 rx_ring->rx_buff_pool_size = 0;
1964 rx_ring->rx_buff_pool = NULL;
1965}
1966
1967
1968
1969
1970
1971
1972
1973
1974static int pch_gbe_sw_init(struct pch_gbe_adapter *adapter)
1975{
1976 struct pch_gbe_hw *hw = &adapter->hw;
1977 struct net_device *netdev = adapter->netdev;
1978
1979 adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_2048;
1980 hw->mac.max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1981 hw->mac.min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
1982 hw->phy.reset_delay_us = PCH_GBE_PHY_RESET_DELAY_US;
1983
1984 if (pch_gbe_alloc_queues(adapter)) {
1985 netdev_err(netdev, "Unable to allocate memory for queues\n");
1986 return -ENOMEM;
1987 }
1988 spin_lock_init(&adapter->hw.miim_lock);
1989 spin_lock_init(&adapter->stats_lock);
1990 spin_lock_init(&adapter->ethtool_lock);
1991 atomic_set(&adapter->irq_sem, 0);
1992 pch_gbe_irq_disable(adapter);
1993
1994 pch_gbe_init_stats(adapter);
1995
1996 netdev_dbg(netdev,
1997 "rx_buffer_len : %d mac.min_frame_size : %d mac.max_frame_size : %d\n",
1998 (u32) adapter->rx_buffer_len,
1999 hw->mac.min_frame_size, hw->mac.max_frame_size);
2000 return 0;
2001}
2002
2003
2004
2005
2006
2007
2008
2009
2010static int pch_gbe_open(struct net_device *netdev)
2011{
2012 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2013 struct pch_gbe_hw *hw = &adapter->hw;
2014 int err;
2015
2016
2017 err = pch_gbe_setup_tx_resources(adapter, adapter->tx_ring);
2018 if (err)
2019 goto err_setup_tx;
2020
2021 err = pch_gbe_setup_rx_resources(adapter, adapter->rx_ring);
2022 if (err)
2023 goto err_setup_rx;
2024 pch_gbe_phy_power_up(hw);
2025 err = pch_gbe_up(adapter);
2026 if (err)
2027 goto err_up;
2028 netdev_dbg(netdev, "Success End\n");
2029 return 0;
2030
2031err_up:
2032 if (!adapter->wake_up_evt)
2033 pch_gbe_phy_power_down(hw);
2034 pch_gbe_free_rx_resources(adapter, adapter->rx_ring);
2035err_setup_rx:
2036 pch_gbe_free_tx_resources(adapter, adapter->tx_ring);
2037err_setup_tx:
2038 pch_gbe_reset(adapter);
2039 netdev_err(netdev, "Error End\n");
2040 return err;
2041}
2042
2043
2044
2045
2046
2047
2048
2049static int pch_gbe_stop(struct net_device *netdev)
2050{
2051 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2052 struct pch_gbe_hw *hw = &adapter->hw;
2053
2054 pch_gbe_down(adapter);
2055 if (!adapter->wake_up_evt)
2056 pch_gbe_phy_power_down(hw);
2057 pch_gbe_free_tx_resources(adapter, adapter->tx_ring);
2058 pch_gbe_free_rx_resources(adapter, adapter->rx_ring);
2059 return 0;
2060}
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070static netdev_tx_t pch_gbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2071{
2072 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2073 struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring;
2074
2075 if (unlikely(!PCH_GBE_DESC_UNUSED(tx_ring))) {
2076 netif_stop_queue(netdev);
2077 netdev_dbg(netdev,
2078 "Return : BUSY next_to use : 0x%08x next_to clean : 0x%08x\n",
2079 tx_ring->next_to_use, tx_ring->next_to_clean);
2080 return NETDEV_TX_BUSY;
2081 }
2082
2083
2084 pch_gbe_tx_queue(adapter, tx_ring, skb);
2085 return NETDEV_TX_OK;
2086}
2087
2088
2089
2090
2091
2092static void pch_gbe_set_multi(struct net_device *netdev)
2093{
2094 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2095 struct pch_gbe_hw *hw = &adapter->hw;
2096 struct netdev_hw_addr *ha;
2097 u32 rctl, adrmask;
2098 int mc_count, i;
2099
2100 netdev_dbg(netdev, "netdev->flags : 0x%08x\n", netdev->flags);
2101
2102
2103 rctl = ioread32(&hw->reg->RX_MODE);
2104 rctl |= PCH_GBE_ADD_FIL_EN | PCH_GBE_MLT_FIL_EN;
2105
2106
2107 if (netdev->flags & IFF_PROMISC)
2108 rctl &= ~(PCH_GBE_ADD_FIL_EN | PCH_GBE_MLT_FIL_EN);
2109
2110
2111
2112
2113 mc_count = netdev_mc_count(netdev);
2114 if ((netdev->flags & IFF_ALLMULTI) || mc_count >= PCH_GBE_MAR_ENTRIES)
2115 rctl &= ~PCH_GBE_MLT_FIL_EN;
2116
2117 iowrite32(rctl, &hw->reg->RX_MODE);
2118
2119
2120
2121
2122 if (!(rctl & PCH_GBE_MLT_FIL_EN))
2123 return;
2124
2125
2126
2127
2128 i = 1;
2129 netdev_for_each_mc_addr(ha, netdev)
2130 pch_gbe_mac_mar_set(hw, ha->addr, i++);
2131
2132
2133 for (; i < PCH_GBE_MAR_ENTRIES; i++) {
2134
2135 adrmask = ioread32(&hw->reg->ADDR_MASK);
2136 iowrite32(adrmask | BIT(i), &hw->reg->ADDR_MASK);
2137
2138 pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
2139
2140 iowrite32(0, &hw->reg->mac_adr[i].high);
2141 iowrite32(0, &hw->reg->mac_adr[i].low);
2142 }
2143
2144 netdev_dbg(netdev,
2145 "RX_MODE reg(check bit31,30 ADD,MLT) : 0x%08x netdev->mc_count : 0x%08x\n",
2146 ioread32(&hw->reg->RX_MODE), mc_count);
2147}
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157static int pch_gbe_set_mac(struct net_device *netdev, void *addr)
2158{
2159 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2160 struct sockaddr *skaddr = addr;
2161 int ret_val;
2162
2163 if (!is_valid_ether_addr(skaddr->sa_data)) {
2164 ret_val = -EADDRNOTAVAIL;
2165 } else {
2166 memcpy(netdev->dev_addr, skaddr->sa_data, netdev->addr_len);
2167 memcpy(adapter->hw.mac.addr, skaddr->sa_data, netdev->addr_len);
2168 pch_gbe_mac_mar_set(&adapter->hw, adapter->hw.mac.addr, 0);
2169 ret_val = 0;
2170 }
2171 netdev_dbg(netdev, "ret_val : 0x%08x\n", ret_val);
2172 netdev_dbg(netdev, "dev_addr : %pM\n", netdev->dev_addr);
2173 netdev_dbg(netdev, "mac_addr : %pM\n", adapter->hw.mac.addr);
2174 netdev_dbg(netdev, "MAC_ADR1AB reg : 0x%08x 0x%08x\n",
2175 ioread32(&adapter->hw.reg->mac_adr[0].high),
2176 ioread32(&adapter->hw.reg->mac_adr[0].low));
2177 return ret_val;
2178}
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188static int pch_gbe_change_mtu(struct net_device *netdev, int new_mtu)
2189{
2190 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2191 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
2192 unsigned long old_rx_buffer_len = adapter->rx_buffer_len;
2193 int err;
2194
2195 if (max_frame <= PCH_GBE_FRAME_SIZE_2048)
2196 adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_2048;
2197 else if (max_frame <= PCH_GBE_FRAME_SIZE_4096)
2198 adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_4096;
2199 else if (max_frame <= PCH_GBE_FRAME_SIZE_8192)
2200 adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_8192;
2201 else
2202 adapter->rx_buffer_len = PCH_GBE_MAX_RX_BUFFER_SIZE;
2203
2204 if (netif_running(netdev)) {
2205 pch_gbe_down(adapter);
2206 err = pch_gbe_up(adapter);
2207 if (err) {
2208 adapter->rx_buffer_len = old_rx_buffer_len;
2209 pch_gbe_up(adapter);
2210 return err;
2211 } else {
2212 netdev->mtu = new_mtu;
2213 adapter->hw.mac.max_frame_size = max_frame;
2214 }
2215 } else {
2216 pch_gbe_reset(adapter);
2217 netdev->mtu = new_mtu;
2218 adapter->hw.mac.max_frame_size = max_frame;
2219 }
2220
2221 netdev_dbg(netdev,
2222 "max_frame : %d rx_buffer_len : %d mtu : %d max_frame_size : %d\n",
2223 max_frame, (u32) adapter->rx_buffer_len, netdev->mtu,
2224 adapter->hw.mac.max_frame_size);
2225 return 0;
2226}
2227
2228
2229
2230
2231
2232
2233
2234
2235static int pch_gbe_set_features(struct net_device *netdev,
2236 netdev_features_t features)
2237{
2238 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2239 netdev_features_t changed = features ^ netdev->features;
2240
2241 if (!(changed & NETIF_F_RXCSUM))
2242 return 0;
2243
2244 if (netif_running(netdev))
2245 pch_gbe_reinit_locked(adapter);
2246 else
2247 pch_gbe_reset(adapter);
2248
2249 return 0;
2250}
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261static int pch_gbe_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2262{
2263 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2264
2265 netdev_dbg(netdev, "cmd : 0x%04x\n", cmd);
2266
2267 if (cmd == SIOCSHWTSTAMP)
2268 return hwtstamp_ioctl(netdev, ifr, cmd);
2269
2270 return generic_mii_ioctl(&adapter->mii, if_mii(ifr), cmd, NULL);
2271}
2272
2273
2274
2275
2276
2277
2278static void pch_gbe_tx_timeout(struct net_device *netdev, unsigned int txqueue)
2279{
2280 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2281
2282
2283 adapter->stats.tx_timeout_count++;
2284 schedule_work(&adapter->reset_task);
2285}
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295static int pch_gbe_napi_poll(struct napi_struct *napi, int budget)
2296{
2297 struct pch_gbe_adapter *adapter =
2298 container_of(napi, struct pch_gbe_adapter, napi);
2299 int work_done = 0;
2300 bool poll_end_flag = false;
2301 bool cleaned = false;
2302
2303 netdev_dbg(adapter->netdev, "budget : %d\n", budget);
2304
2305 pch_gbe_clean_rx(adapter, adapter->rx_ring, &work_done, budget);
2306 cleaned = pch_gbe_clean_tx(adapter, adapter->tx_ring);
2307
2308 if (cleaned)
2309 work_done = budget;
2310
2311
2312
2313 if (work_done < budget)
2314 poll_end_flag = true;
2315
2316 if (poll_end_flag) {
2317 napi_complete_done(napi, work_done);
2318 pch_gbe_irq_enable(adapter);
2319 }
2320
2321 if (adapter->rx_stop_flag) {
2322 adapter->rx_stop_flag = false;
2323 pch_gbe_enable_dma_rx(&adapter->hw);
2324 }
2325
2326 netdev_dbg(adapter->netdev,
2327 "poll_end_flag : %d work_done : %d budget : %d\n",
2328 poll_end_flag, work_done, budget);
2329
2330 return work_done;
2331}
2332
2333#ifdef CONFIG_NET_POLL_CONTROLLER
2334
2335
2336
2337
2338static void pch_gbe_netpoll(struct net_device *netdev)
2339{
2340 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2341
2342 disable_irq(adapter->irq);
2343 pch_gbe_intr(adapter->irq, netdev);
2344 enable_irq(adapter->irq);
2345}
2346#endif
2347
2348static const struct net_device_ops pch_gbe_netdev_ops = {
2349 .ndo_open = pch_gbe_open,
2350 .ndo_stop = pch_gbe_stop,
2351 .ndo_start_xmit = pch_gbe_xmit_frame,
2352 .ndo_set_mac_address = pch_gbe_set_mac,
2353 .ndo_tx_timeout = pch_gbe_tx_timeout,
2354 .ndo_change_mtu = pch_gbe_change_mtu,
2355 .ndo_set_features = pch_gbe_set_features,
2356 .ndo_do_ioctl = pch_gbe_ioctl,
2357 .ndo_set_rx_mode = pch_gbe_set_multi,
2358#ifdef CONFIG_NET_POLL_CONTROLLER
2359 .ndo_poll_controller = pch_gbe_netpoll,
2360#endif
2361};
2362
2363static pci_ers_result_t pch_gbe_io_error_detected(struct pci_dev *pdev,
2364 pci_channel_state_t state)
2365{
2366 struct net_device *netdev = pci_get_drvdata(pdev);
2367 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2368
2369 netif_device_detach(netdev);
2370 if (netif_running(netdev))
2371 pch_gbe_down(adapter);
2372 pci_disable_device(pdev);
2373
2374 return PCI_ERS_RESULT_NEED_RESET;
2375}
2376
2377static pci_ers_result_t pch_gbe_io_slot_reset(struct pci_dev *pdev)
2378{
2379 struct net_device *netdev = pci_get_drvdata(pdev);
2380 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2381 struct pch_gbe_hw *hw = &adapter->hw;
2382
2383 if (pci_enable_device(pdev)) {
2384 netdev_err(netdev, "Cannot re-enable PCI device after reset\n");
2385 return PCI_ERS_RESULT_DISCONNECT;
2386 }
2387 pci_set_master(pdev);
2388 pci_enable_wake(pdev, PCI_D0, 0);
2389 pch_gbe_phy_power_up(hw);
2390 pch_gbe_reset(adapter);
2391
2392 pch_gbe_mac_set_wol_event(hw, 0);
2393
2394 return PCI_ERS_RESULT_RECOVERED;
2395}
2396
2397static void pch_gbe_io_resume(struct pci_dev *pdev)
2398{
2399 struct net_device *netdev = pci_get_drvdata(pdev);
2400 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2401
2402 if (netif_running(netdev)) {
2403 if (pch_gbe_up(adapter)) {
2404 netdev_dbg(netdev,
2405 "can't bring device back up after reset\n");
2406 return;
2407 }
2408 }
2409 netif_device_attach(netdev);
2410}
2411
2412static int __pch_gbe_suspend(struct pci_dev *pdev)
2413{
2414 struct net_device *netdev = pci_get_drvdata(pdev);
2415 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2416 struct pch_gbe_hw *hw = &adapter->hw;
2417 u32 wufc = adapter->wake_up_evt;
2418
2419 netif_device_detach(netdev);
2420 if (netif_running(netdev))
2421 pch_gbe_down(adapter);
2422 if (wufc) {
2423 pch_gbe_set_multi(netdev);
2424 pch_gbe_setup_rctl(adapter);
2425 pch_gbe_configure_rx(adapter);
2426 pch_gbe_set_rgmii_ctrl(adapter, hw->mac.link_speed,
2427 hw->mac.link_duplex);
2428 pch_gbe_set_mode(adapter, hw->mac.link_speed,
2429 hw->mac.link_duplex);
2430 pch_gbe_mac_set_wol_event(hw, wufc);
2431 pci_disable_device(pdev);
2432 } else {
2433 pch_gbe_phy_power_down(hw);
2434 pch_gbe_mac_set_wol_event(hw, wufc);
2435 pci_disable_device(pdev);
2436 }
2437 return 0;
2438}
2439
2440#ifdef CONFIG_PM
2441static int pch_gbe_suspend(struct device *device)
2442{
2443 struct pci_dev *pdev = to_pci_dev(device);
2444
2445 return __pch_gbe_suspend(pdev);
2446}
2447
2448static int pch_gbe_resume(struct device *device)
2449{
2450 struct pci_dev *pdev = to_pci_dev(device);
2451 struct net_device *netdev = pci_get_drvdata(pdev);
2452 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2453 struct pch_gbe_hw *hw = &adapter->hw;
2454 u32 err;
2455
2456 err = pci_enable_device(pdev);
2457 if (err) {
2458 netdev_err(netdev, "Cannot enable PCI device from suspend\n");
2459 return err;
2460 }
2461 pci_set_master(pdev);
2462 pch_gbe_phy_power_up(hw);
2463 pch_gbe_reset(adapter);
2464
2465 pch_gbe_mac_set_wol_event(hw, 0);
2466
2467 if (netif_running(netdev))
2468 pch_gbe_up(adapter);
2469 netif_device_attach(netdev);
2470
2471 return 0;
2472}
2473#endif
2474
2475static void pch_gbe_shutdown(struct pci_dev *pdev)
2476{
2477 __pch_gbe_suspend(pdev);
2478 if (system_state == SYSTEM_POWER_OFF) {
2479 pci_wake_from_d3(pdev, true);
2480 pci_set_power_state(pdev, PCI_D3hot);
2481 }
2482}
2483
2484static void pch_gbe_remove(struct pci_dev *pdev)
2485{
2486 struct net_device *netdev = pci_get_drvdata(pdev);
2487 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2488
2489 cancel_work_sync(&adapter->reset_task);
2490 unregister_netdev(netdev);
2491
2492 pch_gbe_phy_hw_reset(&adapter->hw);
2493
2494 free_netdev(netdev);
2495}
2496
2497static int pch_gbe_probe(struct pci_dev *pdev,
2498 const struct pci_device_id *pci_id)
2499{
2500 struct net_device *netdev;
2501 struct pch_gbe_adapter *adapter;
2502 int ret;
2503
2504 ret = pcim_enable_device(pdev);
2505 if (ret)
2506 return ret;
2507
2508 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
2509 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
2510 if (ret) {
2511 dev_err(&pdev->dev, "ERR: No usable DMA configuration, aborting\n");
2512 return ret;
2513 }
2514 }
2515
2516 ret = pcim_iomap_regions(pdev, 1 << PCH_GBE_PCI_BAR, pci_name(pdev));
2517 if (ret) {
2518 dev_err(&pdev->dev,
2519 "ERR: Can't reserve PCI I/O and memory resources\n");
2520 return ret;
2521 }
2522 pci_set_master(pdev);
2523
2524 netdev = alloc_etherdev((int)sizeof(struct pch_gbe_adapter));
2525 if (!netdev)
2526 return -ENOMEM;
2527 SET_NETDEV_DEV(netdev, &pdev->dev);
2528
2529 pci_set_drvdata(pdev, netdev);
2530 adapter = netdev_priv(netdev);
2531 adapter->netdev = netdev;
2532 adapter->pdev = pdev;
2533 adapter->hw.back = adapter;
2534 adapter->hw.reg = pcim_iomap_table(pdev)[PCH_GBE_PCI_BAR];
2535 adapter->pdata = (struct pch_gbe_privdata *)pci_id->driver_data;
2536 if (adapter->pdata && adapter->pdata->platform_init)
2537 adapter->pdata->platform_init(pdev);
2538
2539 adapter->ptp_pdev =
2540 pci_get_domain_bus_and_slot(pci_domain_nr(adapter->pdev->bus),
2541 adapter->pdev->bus->number,
2542 PCI_DEVFN(12, 4));
2543
2544 netdev->netdev_ops = &pch_gbe_netdev_ops;
2545 netdev->watchdog_timeo = PCH_GBE_WATCHDOG_PERIOD;
2546 netif_napi_add(netdev, &adapter->napi,
2547 pch_gbe_napi_poll, PCH_GBE_RX_WEIGHT);
2548 netdev->hw_features = NETIF_F_RXCSUM |
2549 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2550 netdev->features = netdev->hw_features;
2551 pch_gbe_set_ethtool_ops(netdev);
2552
2553
2554 netdev->min_mtu = ETH_ZLEN - ETH_HLEN;
2555 netdev->max_mtu = PCH_GBE_MAX_JUMBO_FRAME_SIZE -
2556 (ETH_HLEN + ETH_FCS_LEN);
2557
2558 pch_gbe_mac_load_mac_addr(&adapter->hw);
2559 pch_gbe_mac_reset_hw(&adapter->hw);
2560
2561
2562 ret = pch_gbe_sw_init(adapter);
2563 if (ret)
2564 goto err_free_netdev;
2565
2566
2567 ret = pch_gbe_init_phy(adapter);
2568 if (ret) {
2569 dev_err(&pdev->dev, "PHY initialize error\n");
2570 goto err_free_adapter;
2571 }
2572
2573
2574 ret = pch_gbe_mac_read_mac_addr(&adapter->hw);
2575 if (ret) {
2576 dev_err(&pdev->dev, "MAC address Read Error\n");
2577 goto err_free_adapter;
2578 }
2579
2580 memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
2581 if (!is_valid_ether_addr(netdev->dev_addr)) {
2582
2583
2584
2585
2586
2587
2588 dev_err(&pdev->dev, "Invalid MAC address, "
2589 "interface disabled.\n");
2590 }
2591 timer_setup(&adapter->watchdog_timer, pch_gbe_watchdog, 0);
2592
2593 INIT_WORK(&adapter->reset_task, pch_gbe_reset_task);
2594
2595 pch_gbe_check_options(adapter);
2596
2597
2598 adapter->wake_up_evt = PCH_GBE_WL_INIT_SETTING;
2599 dev_info(&pdev->dev, "MAC address : %pM\n", netdev->dev_addr);
2600
2601
2602 pch_gbe_reset(adapter);
2603
2604 ret = register_netdev(netdev);
2605 if (ret)
2606 goto err_free_adapter;
2607
2608 netif_carrier_off(netdev);
2609 netif_stop_queue(netdev);
2610
2611 dev_dbg(&pdev->dev, "PCH Network Connection\n");
2612
2613
2614 if (adapter->pdata && adapter->pdata->phy_disable_hibernate)
2615 pch_gbe_phy_disable_hibernate(&adapter->hw);
2616
2617 device_set_wakeup_enable(&pdev->dev, 1);
2618 return 0;
2619
2620err_free_adapter:
2621 pch_gbe_phy_hw_reset(&adapter->hw);
2622err_free_netdev:
2623 free_netdev(netdev);
2624 return ret;
2625}
2626
2627
2628
2629
2630static int pch_gbe_minnow_platform_init(struct pci_dev *pdev)
2631{
2632 unsigned long flags = GPIOF_DIR_OUT | GPIOF_INIT_HIGH | GPIOF_EXPORT;
2633 unsigned gpio = MINNOW_PHY_RESET_GPIO;
2634 int ret;
2635
2636 ret = devm_gpio_request_one(&pdev->dev, gpio, flags,
2637 "minnow_phy_reset");
2638 if (ret) {
2639 dev_err(&pdev->dev,
2640 "ERR: Can't request PHY reset GPIO line '%d'\n", gpio);
2641 return ret;
2642 }
2643
2644 gpio_set_value(gpio, 0);
2645 usleep_range(1250, 1500);
2646 gpio_set_value(gpio, 1);
2647 usleep_range(1250, 1500);
2648
2649 return ret;
2650}
2651
2652static struct pch_gbe_privdata pch_gbe_minnow_privdata = {
2653 .phy_tx_clk_delay = true,
2654 .phy_disable_hibernate = true,
2655 .platform_init = pch_gbe_minnow_platform_init,
2656};
2657
2658static const struct pci_device_id pch_gbe_pcidev_id[] = {
2659 {.vendor = PCI_VENDOR_ID_INTEL,
2660 .device = PCI_DEVICE_ID_INTEL_IOH1_GBE,
2661 .subvendor = PCI_VENDOR_ID_CIRCUITCO,
2662 .subdevice = PCI_SUBSYSTEM_ID_CIRCUITCO_MINNOWBOARD,
2663 .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
2664 .class_mask = (0xFFFF00),
2665 .driver_data = (kernel_ulong_t)&pch_gbe_minnow_privdata
2666 },
2667 {.vendor = PCI_VENDOR_ID_INTEL,
2668 .device = PCI_DEVICE_ID_INTEL_IOH1_GBE,
2669 .subvendor = PCI_ANY_ID,
2670 .subdevice = PCI_ANY_ID,
2671 .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
2672 .class_mask = (0xFFFF00)
2673 },
2674 {.vendor = PCI_VENDOR_ID_ROHM,
2675 .device = PCI_DEVICE_ID_ROHM_ML7223_GBE,
2676 .subvendor = PCI_ANY_ID,
2677 .subdevice = PCI_ANY_ID,
2678 .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
2679 .class_mask = (0xFFFF00)
2680 },
2681 {.vendor = PCI_VENDOR_ID_ROHM,
2682 .device = PCI_DEVICE_ID_ROHM_ML7831_GBE,
2683 .subvendor = PCI_ANY_ID,
2684 .subdevice = PCI_ANY_ID,
2685 .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
2686 .class_mask = (0xFFFF00)
2687 },
2688
2689 {0}
2690};
2691
2692#ifdef CONFIG_PM
2693static const struct dev_pm_ops pch_gbe_pm_ops = {
2694 .suspend = pch_gbe_suspend,
2695 .resume = pch_gbe_resume,
2696 .freeze = pch_gbe_suspend,
2697 .thaw = pch_gbe_resume,
2698 .poweroff = pch_gbe_suspend,
2699 .restore = pch_gbe_resume,
2700};
2701#endif
2702
2703static const struct pci_error_handlers pch_gbe_err_handler = {
2704 .error_detected = pch_gbe_io_error_detected,
2705 .slot_reset = pch_gbe_io_slot_reset,
2706 .resume = pch_gbe_io_resume
2707};
2708
2709static struct pci_driver pch_gbe_driver = {
2710 .name = KBUILD_MODNAME,
2711 .id_table = pch_gbe_pcidev_id,
2712 .probe = pch_gbe_probe,
2713 .remove = pch_gbe_remove,
2714#ifdef CONFIG_PM
2715 .driver.pm = &pch_gbe_pm_ops,
2716#endif
2717 .shutdown = pch_gbe_shutdown,
2718 .err_handler = &pch_gbe_err_handler
2719};
2720module_pci_driver(pch_gbe_driver);
2721
2722MODULE_DESCRIPTION("EG20T PCH Gigabit ethernet Driver");
2723MODULE_AUTHOR("LAPIS SEMICONDUCTOR, <tshimizu818@gmail.com>");
2724MODULE_LICENSE("GPL");
2725MODULE_VERSION(DRV_VERSION);
2726MODULE_DEVICE_TABLE(pci, pch_gbe_pcidev_id);
2727
2728
2729