1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include "pch_gbe.h"
21#include "pch_gbe_api.h"
22#include <linux/module.h>
23#include <linux/net_tstamp.h>
24#include <linux/ptp_classify.h>
25#include <linux/gpio.h>
26
27#define DRV_VERSION "1.01"
28const char pch_driver_version[] = DRV_VERSION;
29
30#define PCI_DEVICE_ID_INTEL_IOH1_GBE 0x8802
31#define PCH_GBE_MAR_ENTRIES 16
32#define PCH_GBE_SHORT_PKT 64
33#define DSC_INIT16 0xC000
34#define PCH_GBE_DMA_ALIGN 0
35#define PCH_GBE_DMA_PADDING 2
36#define PCH_GBE_WATCHDOG_PERIOD (5 * HZ)
37#define PCH_GBE_COPYBREAK_DEFAULT 256
38#define PCH_GBE_PCI_BAR 1
39#define PCH_GBE_RESERVE_MEMORY 0x200000
40
41
42#define PCI_VENDOR_ID_ROHM 0x10db
43#define PCI_DEVICE_ID_ROHM_ML7223_GBE 0x8013
44
45
46#define PCI_DEVICE_ID_ROHM_ML7831_GBE 0x8802
47
48#define PCH_GBE_TX_WEIGHT 64
49#define PCH_GBE_RX_WEIGHT 64
50#define PCH_GBE_RX_BUFFER_WRITE 16
51
52
53#define PCH_GBE_WL_INIT_SETTING (PCH_GBE_WLC_MP)
54
55#define PCH_GBE_MAC_RGMII_CTRL_SETTING ( \
56 PCH_GBE_CHIP_TYPE_INTERNAL | \
57 PCH_GBE_RGMII_MODE_RGMII \
58 )
59
60
61#define PCH_GBE_MAX_RX_BUFFER_SIZE 0x2880
62#define PCH_GBE_MAX_JUMBO_FRAME_SIZE 10318
63#define PCH_GBE_FRAME_SIZE_2048 2048
64#define PCH_GBE_FRAME_SIZE_4096 4096
65#define PCH_GBE_FRAME_SIZE_8192 8192
66
67#define PCH_GBE_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i]))
68#define PCH_GBE_RX_DESC(R, i) PCH_GBE_GET_DESC(R, i, pch_gbe_rx_desc)
69#define PCH_GBE_TX_DESC(R, i) PCH_GBE_GET_DESC(R, i, pch_gbe_tx_desc)
70#define PCH_GBE_DESC_UNUSED(R) \
71 ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
72 (R)->next_to_clean - (R)->next_to_use - 1)
73
74
75#define PCH_GBE_PAUSE_PKT1_VALUE 0x00C28001
76#define PCH_GBE_PAUSE_PKT2_VALUE 0x00000100
77#define PCH_GBE_PAUSE_PKT4_VALUE 0x01000888
78#define PCH_GBE_PAUSE_PKT5_VALUE 0x0000FFFF
79
80
81
82
83
84
85
86
87
88
89#define PCH_GBE_INT_ENABLE_MASK ( \
90 PCH_GBE_INT_RX_DMA_CMPLT | \
91 PCH_GBE_INT_RX_DSC_EMP | \
92 PCH_GBE_INT_RX_FIFO_ERR | \
93 PCH_GBE_INT_WOL_DET | \
94 PCH_GBE_INT_TX_CMPLT \
95 )
96
97#define PCH_GBE_INT_DISABLE_ALL 0
98
99
100
101#define MASTER_MODE (1<<0)
102#define SLAVE_MODE (0)
103#define V2_MODE (1<<31)
104#define CAP_MODE0 (0)
105#define CAP_MODE2 (1<<17)
106
107
108#define TX_SNAPSHOT_LOCKED (1<<0)
109#define RX_SNAPSHOT_LOCKED (1<<1)
110
111#define PTP_L4_MULTICAST_SA "01:00:5e:00:01:81"
112#define PTP_L2_MULTICAST_SA "01:1b:19:00:00:00"
113
114#define MINNOW_PHY_RESET_GPIO 13
115
116static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT;
117
118static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg);
119static void pch_gbe_mdio_write(struct net_device *netdev, int addr, int reg,
120 int data);
121static void pch_gbe_set_multi(struct net_device *netdev);
122
123static int pch_ptp_match(struct sk_buff *skb, u16 uid_hi, u32 uid_lo, u16 seqid)
124{
125 u8 *data = skb->data;
126 unsigned int offset;
127 u16 *hi, *id;
128 u32 lo;
129
130 if (ptp_classify_raw(skb) == PTP_CLASS_NONE)
131 return 0;
132
133 offset = ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN;
134
135 if (skb->len < offset + OFF_PTP_SEQUENCE_ID + sizeof(seqid))
136 return 0;
137
138 hi = (u16 *)(data + offset + OFF_PTP_SOURCE_UUID);
139 id = (u16 *)(data + offset + OFF_PTP_SEQUENCE_ID);
140
141 memcpy(&lo, &hi[1], sizeof(lo));
142
143 return (uid_hi == *hi &&
144 uid_lo == lo &&
145 seqid == *id);
146}
147
148static void
149pch_rx_timestamp(struct pch_gbe_adapter *adapter, struct sk_buff *skb)
150{
151 struct skb_shared_hwtstamps *shhwtstamps;
152 struct pci_dev *pdev;
153 u64 ns;
154 u32 hi, lo, val;
155 u16 uid, seq;
156
157 if (!adapter->hwts_rx_en)
158 return;
159
160
161 pdev = adapter->ptp_pdev;
162
163 val = pch_ch_event_read(pdev);
164
165 if (!(val & RX_SNAPSHOT_LOCKED))
166 return;
167
168 lo = pch_src_uuid_lo_read(pdev);
169 hi = pch_src_uuid_hi_read(pdev);
170
171 uid = hi & 0xffff;
172 seq = (hi >> 16) & 0xffff;
173
174 if (!pch_ptp_match(skb, htons(uid), htonl(lo), htons(seq)))
175 goto out;
176
177 ns = pch_rx_snap_read(pdev);
178
179 shhwtstamps = skb_hwtstamps(skb);
180 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
181 shhwtstamps->hwtstamp = ns_to_ktime(ns);
182out:
183 pch_ch_event_write(pdev, RX_SNAPSHOT_LOCKED);
184}
185
186static void
187pch_tx_timestamp(struct pch_gbe_adapter *adapter, struct sk_buff *skb)
188{
189 struct skb_shared_hwtstamps shhwtstamps;
190 struct pci_dev *pdev;
191 struct skb_shared_info *shtx;
192 u64 ns;
193 u32 cnt, val;
194
195 shtx = skb_shinfo(skb);
196 if (likely(!(shtx->tx_flags & SKBTX_HW_TSTAMP && adapter->hwts_tx_en)))
197 return;
198
199 shtx->tx_flags |= SKBTX_IN_PROGRESS;
200
201
202 pdev = adapter->ptp_pdev;
203
204
205
206
207 for (cnt = 0; cnt < 100; cnt++) {
208 val = pch_ch_event_read(pdev);
209 if (val & TX_SNAPSHOT_LOCKED)
210 break;
211 udelay(1);
212 }
213 if (!(val & TX_SNAPSHOT_LOCKED)) {
214 shtx->tx_flags &= ~SKBTX_IN_PROGRESS;
215 return;
216 }
217
218 ns = pch_tx_snap_read(pdev);
219
220 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
221 shhwtstamps.hwtstamp = ns_to_ktime(ns);
222 skb_tstamp_tx(skb, &shhwtstamps);
223
224 pch_ch_event_write(pdev, TX_SNAPSHOT_LOCKED);
225}
226
227static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
228{
229 struct hwtstamp_config cfg;
230 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
231 struct pci_dev *pdev;
232 u8 station[20];
233
234 if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
235 return -EFAULT;
236
237 if (cfg.flags)
238 return -EINVAL;
239
240
241 pdev = adapter->ptp_pdev;
242
243 if (cfg.tx_type != HWTSTAMP_TX_OFF && cfg.tx_type != HWTSTAMP_TX_ON)
244 return -ERANGE;
245
246 switch (cfg.rx_filter) {
247 case HWTSTAMP_FILTER_NONE:
248 adapter->hwts_rx_en = 0;
249 break;
250 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
251 adapter->hwts_rx_en = 0;
252 pch_ch_control_write(pdev, SLAVE_MODE | CAP_MODE0);
253 break;
254 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
255 adapter->hwts_rx_en = 1;
256 pch_ch_control_write(pdev, MASTER_MODE | CAP_MODE0);
257 break;
258 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
259 adapter->hwts_rx_en = 1;
260 pch_ch_control_write(pdev, V2_MODE | CAP_MODE2);
261 strcpy(station, PTP_L4_MULTICAST_SA);
262 pch_set_station_address(station, pdev);
263 break;
264 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
265 adapter->hwts_rx_en = 1;
266 pch_ch_control_write(pdev, V2_MODE | CAP_MODE2);
267 strcpy(station, PTP_L2_MULTICAST_SA);
268 pch_set_station_address(station, pdev);
269 break;
270 default:
271 return -ERANGE;
272 }
273
274 adapter->hwts_tx_en = cfg.tx_type == HWTSTAMP_TX_ON;
275
276
277 pch_ch_event_write(pdev, TX_SNAPSHOT_LOCKED | RX_SNAPSHOT_LOCKED);
278
279 return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
280}
281
282static inline void pch_gbe_mac_load_mac_addr(struct pch_gbe_hw *hw)
283{
284 iowrite32(0x01, &hw->reg->MAC_ADDR_LOAD);
285}
286
287
288
289
290
291
292
293s32 pch_gbe_mac_read_mac_addr(struct pch_gbe_hw *hw)
294{
295 struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
296 u32 adr1a, adr1b;
297
298 adr1a = ioread32(&hw->reg->mac_adr[0].high);
299 adr1b = ioread32(&hw->reg->mac_adr[0].low);
300
301 hw->mac.addr[0] = (u8)(adr1a & 0xFF);
302 hw->mac.addr[1] = (u8)((adr1a >> 8) & 0xFF);
303 hw->mac.addr[2] = (u8)((adr1a >> 16) & 0xFF);
304 hw->mac.addr[3] = (u8)((adr1a >> 24) & 0xFF);
305 hw->mac.addr[4] = (u8)(adr1b & 0xFF);
306 hw->mac.addr[5] = (u8)((adr1b >> 8) & 0xFF);
307
308 netdev_dbg(adapter->netdev, "hw->mac.addr : %pM\n", hw->mac.addr);
309 return 0;
310}
311
312
313
314
315
316
317static void pch_gbe_wait_clr_bit(void *reg, u32 bit)
318{
319 u32 tmp;
320
321
322 tmp = 1000;
323 while ((ioread32(reg) & bit) && --tmp)
324 cpu_relax();
325 if (!tmp)
326 pr_err("Error: busy bit is not cleared\n");
327}
328
329
330
331
332
333
334
335static void pch_gbe_mac_mar_set(struct pch_gbe_hw *hw, u8 * addr, u32 index)
336{
337 struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
338 u32 mar_low, mar_high, adrmask;
339
340 netdev_dbg(adapter->netdev, "index : 0x%x\n", index);
341
342
343
344
345
346 mar_high = ((u32) addr[0] | ((u32) addr[1] << 8) |
347 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
348 mar_low = ((u32) addr[4] | ((u32) addr[5] << 8));
349
350 adrmask = ioread32(&hw->reg->ADDR_MASK);
351 iowrite32((adrmask | (0x0001 << index)), &hw->reg->ADDR_MASK);
352
353 pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
354
355 iowrite32(mar_high, &hw->reg->mac_adr[index].high);
356 iowrite32(mar_low, &hw->reg->mac_adr[index].low);
357
358 iowrite32((adrmask & ~(0x0001 << index)), &hw->reg->ADDR_MASK);
359
360 pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
361}
362
363
364
365
366
367static void pch_gbe_mac_reset_hw(struct pch_gbe_hw *hw)
368{
369
370 pch_gbe_mac_read_mac_addr(hw);
371 iowrite32(PCH_GBE_ALL_RST, &hw->reg->RESET);
372#ifdef PCH_GBE_MAC_IFOP_RGMII
373 iowrite32(PCH_GBE_MODE_GMII_ETHER, &hw->reg->MODE);
374#endif
375 pch_gbe_wait_clr_bit(&hw->reg->RESET, PCH_GBE_ALL_RST);
376
377 pch_gbe_mac_mar_set(hw, hw->mac.addr, 0);
378 return;
379}
380
381static void pch_gbe_disable_mac_rx(struct pch_gbe_hw *hw)
382{
383 u32 rctl;
384
385 rctl = ioread32(&hw->reg->MAC_RX_EN);
386 iowrite32((rctl & ~PCH_GBE_MRE_MAC_RX_EN), &hw->reg->MAC_RX_EN);
387}
388
389static void pch_gbe_enable_mac_rx(struct pch_gbe_hw *hw)
390{
391 u32 rctl;
392
393 rctl = ioread32(&hw->reg->MAC_RX_EN);
394 iowrite32((rctl | PCH_GBE_MRE_MAC_RX_EN), &hw->reg->MAC_RX_EN);
395}
396
397
398
399
400
401
402static void pch_gbe_mac_init_rx_addrs(struct pch_gbe_hw *hw, u16 mar_count)
403{
404 u32 i;
405
406
407 pch_gbe_mac_mar_set(hw, hw->mac.addr, 0);
408
409
410 for (i = 1; i < mar_count; i++) {
411 iowrite32(0, &hw->reg->mac_adr[i].high);
412 iowrite32(0, &hw->reg->mac_adr[i].low);
413 }
414 iowrite32(0xFFFE, &hw->reg->ADDR_MASK);
415
416 pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
417}
418
419
420
421
422
423
424
425
426
427
428static void pch_gbe_mac_mc_addr_list_update(struct pch_gbe_hw *hw,
429 u8 *mc_addr_list, u32 mc_addr_count,
430 u32 mar_used_count, u32 mar_total_num)
431{
432 u32 i, adrmask;
433
434
435
436
437
438 for (i = mar_used_count; i < mar_total_num; i++) {
439 if (mc_addr_count) {
440 pch_gbe_mac_mar_set(hw, mc_addr_list, i);
441 mc_addr_count--;
442 mc_addr_list += ETH_ALEN;
443 } else {
444
445 adrmask = ioread32(&hw->reg->ADDR_MASK);
446 iowrite32((adrmask | (0x0001 << i)),
447 &hw->reg->ADDR_MASK);
448
449 pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
450
451 iowrite32(0, &hw->reg->mac_adr[i].high);
452 iowrite32(0, &hw->reg->mac_adr[i].low);
453 }
454 }
455}
456
457
458
459
460
461
462
463
464s32 pch_gbe_mac_force_mac_fc(struct pch_gbe_hw *hw)
465{
466 struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
467 struct pch_gbe_mac_info *mac = &hw->mac;
468 u32 rx_fctrl;
469
470 netdev_dbg(adapter->netdev, "mac->fc = %u\n", mac->fc);
471
472 rx_fctrl = ioread32(&hw->reg->RX_FCTRL);
473
474 switch (mac->fc) {
475 case PCH_GBE_FC_NONE:
476 rx_fctrl &= ~PCH_GBE_FL_CTRL_EN;
477 mac->tx_fc_enable = false;
478 break;
479 case PCH_GBE_FC_RX_PAUSE:
480 rx_fctrl |= PCH_GBE_FL_CTRL_EN;
481 mac->tx_fc_enable = false;
482 break;
483 case PCH_GBE_FC_TX_PAUSE:
484 rx_fctrl &= ~PCH_GBE_FL_CTRL_EN;
485 mac->tx_fc_enable = true;
486 break;
487 case PCH_GBE_FC_FULL:
488 rx_fctrl |= PCH_GBE_FL_CTRL_EN;
489 mac->tx_fc_enable = true;
490 break;
491 default:
492 netdev_err(adapter->netdev,
493 "Flow control param set incorrectly\n");
494 return -EINVAL;
495 }
496 if (mac->link_duplex == DUPLEX_HALF)
497 rx_fctrl &= ~PCH_GBE_FL_CTRL_EN;
498 iowrite32(rx_fctrl, &hw->reg->RX_FCTRL);
499 netdev_dbg(adapter->netdev,
500 "RX_FCTRL reg : 0x%08x mac->tx_fc_enable : %d\n",
501 ioread32(&hw->reg->RX_FCTRL), mac->tx_fc_enable);
502 return 0;
503}
504
505
506
507
508
509
510static void pch_gbe_mac_set_wol_event(struct pch_gbe_hw *hw, u32 wu_evt)
511{
512 struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
513 u32 addr_mask;
514
515 netdev_dbg(adapter->netdev, "wu_evt : 0x%08x ADDR_MASK reg : 0x%08x\n",
516 wu_evt, ioread32(&hw->reg->ADDR_MASK));
517
518 if (wu_evt) {
519
520 addr_mask = ioread32(&hw->reg->ADDR_MASK);
521 iowrite32(addr_mask, &hw->reg->WOL_ADDR_MASK);
522
523 pch_gbe_wait_clr_bit(&hw->reg->WOL_ADDR_MASK, PCH_GBE_WLA_BUSY);
524 iowrite32(0, &hw->reg->WOL_ST);
525 iowrite32((wu_evt | PCH_GBE_WLC_WOL_MODE), &hw->reg->WOL_CTRL);
526 iowrite32(0x02, &hw->reg->TCPIP_ACC);
527 iowrite32(PCH_GBE_INT_ENABLE_MASK, &hw->reg->INT_EN);
528 } else {
529 iowrite32(0, &hw->reg->WOL_CTRL);
530 iowrite32(0, &hw->reg->WOL_ST);
531 }
532 return;
533}
534
535
536
537
538
539
540
541
542
543
544
545u16 pch_gbe_mac_ctrl_miim(struct pch_gbe_hw *hw, u32 addr, u32 dir, u32 reg,
546 u16 data)
547{
548 struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
549 u32 data_out = 0;
550 unsigned int i;
551 unsigned long flags;
552
553 spin_lock_irqsave(&hw->miim_lock, flags);
554
555 for (i = 100; i; --i) {
556 if ((ioread32(&hw->reg->MIIM) & PCH_GBE_MIIM_OPER_READY))
557 break;
558 udelay(20);
559 }
560 if (i == 0) {
561 netdev_err(adapter->netdev, "pch-gbe.miim won't go Ready\n");
562 spin_unlock_irqrestore(&hw->miim_lock, flags);
563 return 0;
564 }
565 iowrite32(((reg << PCH_GBE_MIIM_REG_ADDR_SHIFT) |
566 (addr << PCH_GBE_MIIM_PHY_ADDR_SHIFT) |
567 dir | data), &hw->reg->MIIM);
568 for (i = 0; i < 100; i++) {
569 udelay(20);
570 data_out = ioread32(&hw->reg->MIIM);
571 if ((data_out & PCH_GBE_MIIM_OPER_READY))
572 break;
573 }
574 spin_unlock_irqrestore(&hw->miim_lock, flags);
575
576 netdev_dbg(adapter->netdev, "PHY %s: reg=%d, data=0x%04X\n",
577 dir == PCH_GBE_MIIM_OPER_READ ? "READ" : "WRITE", reg,
578 dir == PCH_GBE_MIIM_OPER_READ ? data_out : data);
579 return (u16) data_out;
580}
581
582
583
584
585
586static void pch_gbe_mac_set_pause_packet(struct pch_gbe_hw *hw)
587{
588 struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
589 unsigned long tmp2, tmp3;
590
591
592 tmp2 = hw->mac.addr[1];
593 tmp2 = (tmp2 << 8) | hw->mac.addr[0];
594 tmp2 = PCH_GBE_PAUSE_PKT2_VALUE | (tmp2 << 16);
595
596 tmp3 = hw->mac.addr[5];
597 tmp3 = (tmp3 << 8) | hw->mac.addr[4];
598 tmp3 = (tmp3 << 8) | hw->mac.addr[3];
599 tmp3 = (tmp3 << 8) | hw->mac.addr[2];
600
601 iowrite32(PCH_GBE_PAUSE_PKT1_VALUE, &hw->reg->PAUSE_PKT1);
602 iowrite32(tmp2, &hw->reg->PAUSE_PKT2);
603 iowrite32(tmp3, &hw->reg->PAUSE_PKT3);
604 iowrite32(PCH_GBE_PAUSE_PKT4_VALUE, &hw->reg->PAUSE_PKT4);
605 iowrite32(PCH_GBE_PAUSE_PKT5_VALUE, &hw->reg->PAUSE_PKT5);
606
607
608 iowrite32(PCH_GBE_PS_PKT_RQ, &hw->reg->PAUSE_REQ);
609
610 netdev_dbg(adapter->netdev,
611 "PAUSE_PKT1-5 reg : 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
612 ioread32(&hw->reg->PAUSE_PKT1),
613 ioread32(&hw->reg->PAUSE_PKT2),
614 ioread32(&hw->reg->PAUSE_PKT3),
615 ioread32(&hw->reg->PAUSE_PKT4),
616 ioread32(&hw->reg->PAUSE_PKT5));
617
618 return;
619}
620
621
622
623
624
625
626
627
628
629static int pch_gbe_alloc_queues(struct pch_gbe_adapter *adapter)
630{
631 adapter->tx_ring = devm_kzalloc(&adapter->pdev->dev,
632 sizeof(*adapter->tx_ring), GFP_KERNEL);
633 if (!adapter->tx_ring)
634 return -ENOMEM;
635
636 adapter->rx_ring = devm_kzalloc(&adapter->pdev->dev,
637 sizeof(*adapter->rx_ring), GFP_KERNEL);
638 if (!adapter->rx_ring)
639 return -ENOMEM;
640 return 0;
641}
642
643
644
645
646
647static void pch_gbe_init_stats(struct pch_gbe_adapter *adapter)
648{
649 memset(&adapter->stats, 0, sizeof(adapter->stats));
650 return;
651}
652
653
654
655
656
657
658
659
660static int pch_gbe_init_phy(struct pch_gbe_adapter *adapter)
661{
662 struct net_device *netdev = adapter->netdev;
663 u32 addr;
664 u16 bmcr, stat;
665
666
667 for (addr = 0; addr < PCH_GBE_PHY_REGS_LEN; addr++) {
668 adapter->mii.phy_id = (addr == 0) ? 1 : (addr == 1) ? 0 : addr;
669 bmcr = pch_gbe_mdio_read(netdev, adapter->mii.phy_id, MII_BMCR);
670 stat = pch_gbe_mdio_read(netdev, adapter->mii.phy_id, MII_BMSR);
671 stat = pch_gbe_mdio_read(netdev, adapter->mii.phy_id, MII_BMSR);
672 if (!((bmcr == 0xFFFF) || ((stat == 0) && (bmcr == 0))))
673 break;
674 }
675 adapter->hw.phy.addr = adapter->mii.phy_id;
676 netdev_dbg(netdev, "phy_addr = %d\n", adapter->mii.phy_id);
677 if (addr == PCH_GBE_PHY_REGS_LEN)
678 return -EAGAIN;
679
680 for (addr = 0; addr < PCH_GBE_PHY_REGS_LEN; addr++) {
681 if (addr != adapter->mii.phy_id) {
682 pch_gbe_mdio_write(netdev, addr, MII_BMCR,
683 BMCR_ISOLATE);
684 } else {
685 bmcr = pch_gbe_mdio_read(netdev, addr, MII_BMCR);
686 pch_gbe_mdio_write(netdev, addr, MII_BMCR,
687 bmcr & ~BMCR_ISOLATE);
688 }
689 }
690
691
692 adapter->mii.phy_id_mask = 0x1F;
693 adapter->mii.reg_num_mask = 0x1F;
694 adapter->mii.dev = adapter->netdev;
695 adapter->mii.mdio_read = pch_gbe_mdio_read;
696 adapter->mii.mdio_write = pch_gbe_mdio_write;
697 adapter->mii.supports_gmii = mii_check_gmii_support(&adapter->mii);
698 return 0;
699}
700
701
702
703
704
705
706
707
708
709
710static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg)
711{
712 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
713 struct pch_gbe_hw *hw = &adapter->hw;
714
715 return pch_gbe_mac_ctrl_miim(hw, addr, PCH_GBE_HAL_MIIM_READ, reg,
716 (u16) 0);
717}
718
719
720
721
722
723
724
725
726static void pch_gbe_mdio_write(struct net_device *netdev,
727 int addr, int reg, int data)
728{
729 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
730 struct pch_gbe_hw *hw = &adapter->hw;
731
732 pch_gbe_mac_ctrl_miim(hw, addr, PCH_GBE_HAL_MIIM_WRITE, reg, data);
733}
734
735
736
737
738
739static void pch_gbe_reset_task(struct work_struct *work)
740{
741 struct pch_gbe_adapter *adapter;
742 adapter = container_of(work, struct pch_gbe_adapter, reset_task);
743
744 rtnl_lock();
745 pch_gbe_reinit_locked(adapter);
746 rtnl_unlock();
747}
748
749
750
751
752
753void pch_gbe_reinit_locked(struct pch_gbe_adapter *adapter)
754{
755 pch_gbe_down(adapter);
756 pch_gbe_up(adapter);
757}
758
759
760
761
762
763void pch_gbe_reset(struct pch_gbe_adapter *adapter)
764{
765 struct net_device *netdev = adapter->netdev;
766
767 pch_gbe_mac_reset_hw(&adapter->hw);
768
769 pch_gbe_set_multi(netdev);
770
771 pch_gbe_mac_init_rx_addrs(&adapter->hw, PCH_GBE_MAR_ENTRIES);
772 if (pch_gbe_hal_init_hw(&adapter->hw))
773 netdev_err(netdev, "Hardware Error\n");
774}
775
776
777
778
779
780static void pch_gbe_free_irq(struct pch_gbe_adapter *adapter)
781{
782 struct net_device *netdev = adapter->netdev;
783
784 free_irq(adapter->irq, netdev);
785 pci_free_irq_vectors(adapter->pdev);
786}
787
788
789
790
791
792static void pch_gbe_irq_disable(struct pch_gbe_adapter *adapter)
793{
794 struct pch_gbe_hw *hw = &adapter->hw;
795
796 atomic_inc(&adapter->irq_sem);
797 iowrite32(0, &hw->reg->INT_EN);
798 ioread32(&hw->reg->INT_ST);
799 synchronize_irq(adapter->irq);
800
801 netdev_dbg(adapter->netdev, "INT_EN reg : 0x%08x\n",
802 ioread32(&hw->reg->INT_EN));
803}
804
805
806
807
808
809static void pch_gbe_irq_enable(struct pch_gbe_adapter *adapter)
810{
811 struct pch_gbe_hw *hw = &adapter->hw;
812
813 if (likely(atomic_dec_and_test(&adapter->irq_sem)))
814 iowrite32(PCH_GBE_INT_ENABLE_MASK, &hw->reg->INT_EN);
815 ioread32(&hw->reg->INT_ST);
816 netdev_dbg(adapter->netdev, "INT_EN reg : 0x%08x\n",
817 ioread32(&hw->reg->INT_EN));
818}
819
820
821
822
823
824
825
826static void pch_gbe_setup_tctl(struct pch_gbe_adapter *adapter)
827{
828 struct pch_gbe_hw *hw = &adapter->hw;
829 u32 tx_mode, tcpip;
830
831 tx_mode = PCH_GBE_TM_LONG_PKT |
832 PCH_GBE_TM_ST_AND_FD |
833 PCH_GBE_TM_SHORT_PKT |
834 PCH_GBE_TM_TH_TX_STRT_8 |
835 PCH_GBE_TM_TH_ALM_EMP_4 | PCH_GBE_TM_TH_ALM_FULL_8;
836
837 iowrite32(tx_mode, &hw->reg->TX_MODE);
838
839 tcpip = ioread32(&hw->reg->TCPIP_ACC);
840 tcpip |= PCH_GBE_TX_TCPIPACC_EN;
841 iowrite32(tcpip, &hw->reg->TCPIP_ACC);
842 return;
843}
844
845
846
847
848
849static void pch_gbe_configure_tx(struct pch_gbe_adapter *adapter)
850{
851 struct pch_gbe_hw *hw = &adapter->hw;
852 u32 tdba, tdlen, dctrl;
853
854 netdev_dbg(adapter->netdev, "dma addr = 0x%08llx size = 0x%08x\n",
855 (unsigned long long)adapter->tx_ring->dma,
856 adapter->tx_ring->size);
857
858
859 tdba = adapter->tx_ring->dma;
860 tdlen = adapter->tx_ring->size - 0x10;
861 iowrite32(tdba, &hw->reg->TX_DSC_BASE);
862 iowrite32(tdlen, &hw->reg->TX_DSC_SIZE);
863 iowrite32(tdba, &hw->reg->TX_DSC_SW_P);
864
865
866 dctrl = ioread32(&hw->reg->DMA_CTRL);
867 dctrl |= PCH_GBE_TX_DMA_EN;
868 iowrite32(dctrl, &hw->reg->DMA_CTRL);
869}
870
871
872
873
874
875static void pch_gbe_setup_rctl(struct pch_gbe_adapter *adapter)
876{
877 struct pch_gbe_hw *hw = &adapter->hw;
878 u32 rx_mode, tcpip;
879
880 rx_mode = PCH_GBE_ADD_FIL_EN | PCH_GBE_MLT_FIL_EN |
881 PCH_GBE_RH_ALM_EMP_4 | PCH_GBE_RH_ALM_FULL_4 | PCH_GBE_RH_RD_TRG_8;
882
883 iowrite32(rx_mode, &hw->reg->RX_MODE);
884
885 tcpip = ioread32(&hw->reg->TCPIP_ACC);
886
887 tcpip |= PCH_GBE_RX_TCPIPACC_OFF;
888 tcpip &= ~PCH_GBE_RX_TCPIPACC_EN;
889 iowrite32(tcpip, &hw->reg->TCPIP_ACC);
890 return;
891}
892
893
894
895
896
897static void pch_gbe_configure_rx(struct pch_gbe_adapter *adapter)
898{
899 struct pch_gbe_hw *hw = &adapter->hw;
900 u32 rdba, rdlen, rxdma;
901
902 netdev_dbg(adapter->netdev, "dma adr = 0x%08llx size = 0x%08x\n",
903 (unsigned long long)adapter->rx_ring->dma,
904 adapter->rx_ring->size);
905
906 pch_gbe_mac_force_mac_fc(hw);
907
908 pch_gbe_disable_mac_rx(hw);
909
910
911 rxdma = ioread32(&hw->reg->DMA_CTRL);
912 rxdma &= ~PCH_GBE_RX_DMA_EN;
913 iowrite32(rxdma, &hw->reg->DMA_CTRL);
914
915 netdev_dbg(adapter->netdev,
916 "MAC_RX_EN reg = 0x%08x DMA_CTRL reg = 0x%08x\n",
917 ioread32(&hw->reg->MAC_RX_EN),
918 ioread32(&hw->reg->DMA_CTRL));
919
920
921
922 rdba = adapter->rx_ring->dma;
923 rdlen = adapter->rx_ring->size - 0x10;
924 iowrite32(rdba, &hw->reg->RX_DSC_BASE);
925 iowrite32(rdlen, &hw->reg->RX_DSC_SIZE);
926 iowrite32((rdba + rdlen), &hw->reg->RX_DSC_SW_P);
927}
928
929
930
931
932
933
934static void pch_gbe_unmap_and_free_tx_resource(
935 struct pch_gbe_adapter *adapter, struct pch_gbe_buffer *buffer_info)
936{
937 if (buffer_info->mapped) {
938 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
939 buffer_info->length, DMA_TO_DEVICE);
940 buffer_info->mapped = false;
941 }
942 if (buffer_info->skb) {
943 dev_kfree_skb_any(buffer_info->skb);
944 buffer_info->skb = NULL;
945 }
946}
947
948
949
950
951
952
953static void pch_gbe_unmap_and_free_rx_resource(
954 struct pch_gbe_adapter *adapter,
955 struct pch_gbe_buffer *buffer_info)
956{
957 if (buffer_info->mapped) {
958 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
959 buffer_info->length, DMA_FROM_DEVICE);
960 buffer_info->mapped = false;
961 }
962 if (buffer_info->skb) {
963 dev_kfree_skb_any(buffer_info->skb);
964 buffer_info->skb = NULL;
965 }
966}
967
968
969
970
971
972
973static void pch_gbe_clean_tx_ring(struct pch_gbe_adapter *adapter,
974 struct pch_gbe_tx_ring *tx_ring)
975{
976 struct pch_gbe_hw *hw = &adapter->hw;
977 struct pch_gbe_buffer *buffer_info;
978 unsigned long size;
979 unsigned int i;
980
981
982 for (i = 0; i < tx_ring->count; i++) {
983 buffer_info = &tx_ring->buffer_info[i];
984 pch_gbe_unmap_and_free_tx_resource(adapter, buffer_info);
985 }
986 netdev_dbg(adapter->netdev,
987 "call pch_gbe_unmap_and_free_tx_resource() %d count\n", i);
988
989 size = (unsigned long)sizeof(struct pch_gbe_buffer) * tx_ring->count;
990 memset(tx_ring->buffer_info, 0, size);
991
992
993 memset(tx_ring->desc, 0, tx_ring->size);
994 tx_ring->next_to_use = 0;
995 tx_ring->next_to_clean = 0;
996 iowrite32(tx_ring->dma, &hw->reg->TX_DSC_HW_P);
997 iowrite32((tx_ring->size - 0x10), &hw->reg->TX_DSC_SIZE);
998}
999
1000
1001
1002
1003
1004
1005static void
1006pch_gbe_clean_rx_ring(struct pch_gbe_adapter *adapter,
1007 struct pch_gbe_rx_ring *rx_ring)
1008{
1009 struct pch_gbe_hw *hw = &adapter->hw;
1010 struct pch_gbe_buffer *buffer_info;
1011 unsigned long size;
1012 unsigned int i;
1013
1014
1015 for (i = 0; i < rx_ring->count; i++) {
1016 buffer_info = &rx_ring->buffer_info[i];
1017 pch_gbe_unmap_and_free_rx_resource(adapter, buffer_info);
1018 }
1019 netdev_dbg(adapter->netdev,
1020 "call pch_gbe_unmap_and_free_rx_resource() %d count\n", i);
1021 size = (unsigned long)sizeof(struct pch_gbe_buffer) * rx_ring->count;
1022 memset(rx_ring->buffer_info, 0, size);
1023
1024
1025 memset(rx_ring->desc, 0, rx_ring->size);
1026 rx_ring->next_to_clean = 0;
1027 rx_ring->next_to_use = 0;
1028 iowrite32(rx_ring->dma, &hw->reg->RX_DSC_HW_P);
1029 iowrite32((rx_ring->size - 0x10), &hw->reg->RX_DSC_SIZE);
1030}
1031
1032static void pch_gbe_set_rgmii_ctrl(struct pch_gbe_adapter *adapter, u16 speed,
1033 u16 duplex)
1034{
1035 struct pch_gbe_hw *hw = &adapter->hw;
1036 unsigned long rgmii = 0;
1037
1038
1039#ifdef PCH_GBE_MAC_IFOP_RGMII
1040 switch (speed) {
1041 case SPEED_10:
1042 rgmii = (PCH_GBE_RGMII_RATE_2_5M |
1043 PCH_GBE_MAC_RGMII_CTRL_SETTING);
1044 break;
1045 case SPEED_100:
1046 rgmii = (PCH_GBE_RGMII_RATE_25M |
1047 PCH_GBE_MAC_RGMII_CTRL_SETTING);
1048 break;
1049 case SPEED_1000:
1050 rgmii = (PCH_GBE_RGMII_RATE_125M |
1051 PCH_GBE_MAC_RGMII_CTRL_SETTING);
1052 break;
1053 }
1054 iowrite32(rgmii, &hw->reg->RGMII_CTRL);
1055#else
1056 rgmii = 0;
1057 iowrite32(rgmii, &hw->reg->RGMII_CTRL);
1058#endif
1059}
1060static void pch_gbe_set_mode(struct pch_gbe_adapter *adapter, u16 speed,
1061 u16 duplex)
1062{
1063 struct net_device *netdev = adapter->netdev;
1064 struct pch_gbe_hw *hw = &adapter->hw;
1065 unsigned long mode = 0;
1066
1067
1068 switch (speed) {
1069 case SPEED_10:
1070 mode = PCH_GBE_MODE_MII_ETHER;
1071 netdev->tx_queue_len = 10;
1072 break;
1073 case SPEED_100:
1074 mode = PCH_GBE_MODE_MII_ETHER;
1075 netdev->tx_queue_len = 100;
1076 break;
1077 case SPEED_1000:
1078 mode = PCH_GBE_MODE_GMII_ETHER;
1079 break;
1080 }
1081 if (duplex == DUPLEX_FULL)
1082 mode |= PCH_GBE_MODE_FULL_DUPLEX;
1083 else
1084 mode |= PCH_GBE_MODE_HALF_DUPLEX;
1085 iowrite32(mode, &hw->reg->MODE);
1086}
1087
1088
1089
1090
1091
1092static void pch_gbe_watchdog(struct timer_list *t)
1093{
1094 struct pch_gbe_adapter *adapter = from_timer(adapter, t,
1095 watchdog_timer);
1096 struct net_device *netdev = adapter->netdev;
1097 struct pch_gbe_hw *hw = &adapter->hw;
1098
1099 netdev_dbg(netdev, "right now = %ld\n", jiffies);
1100
1101 pch_gbe_update_stats(adapter);
1102 if ((mii_link_ok(&adapter->mii)) && (!netif_carrier_ok(netdev))) {
1103 struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET };
1104 netdev->tx_queue_len = adapter->tx_queue_len;
1105
1106 if (mii_ethtool_gset(&adapter->mii, &cmd)) {
1107 netdev_err(netdev, "ethtool get setting Error\n");
1108 mod_timer(&adapter->watchdog_timer,
1109 round_jiffies(jiffies +
1110 PCH_GBE_WATCHDOG_PERIOD));
1111 return;
1112 }
1113 hw->mac.link_speed = ethtool_cmd_speed(&cmd);
1114 hw->mac.link_duplex = cmd.duplex;
1115
1116 pch_gbe_set_rgmii_ctrl(adapter, hw->mac.link_speed,
1117 hw->mac.link_duplex);
1118
1119 pch_gbe_set_mode(adapter, hw->mac.link_speed,
1120 hw->mac.link_duplex);
1121 netdev_dbg(netdev,
1122 "Link is Up %d Mbps %s-Duplex\n",
1123 hw->mac.link_speed,
1124 cmd.duplex == DUPLEX_FULL ? "Full" : "Half");
1125 netif_carrier_on(netdev);
1126 netif_wake_queue(netdev);
1127 } else if ((!mii_link_ok(&adapter->mii)) &&
1128 (netif_carrier_ok(netdev))) {
1129 netdev_dbg(netdev, "NIC Link is Down\n");
1130 hw->mac.link_speed = SPEED_10;
1131 hw->mac.link_duplex = DUPLEX_HALF;
1132 netif_carrier_off(netdev);
1133 netif_stop_queue(netdev);
1134 }
1135 mod_timer(&adapter->watchdog_timer,
1136 round_jiffies(jiffies + PCH_GBE_WATCHDOG_PERIOD));
1137}
1138
1139
1140
1141
1142
1143
1144
1145static void pch_gbe_tx_queue(struct pch_gbe_adapter *adapter,
1146 struct pch_gbe_tx_ring *tx_ring,
1147 struct sk_buff *skb)
1148{
1149 struct pch_gbe_hw *hw = &adapter->hw;
1150 struct pch_gbe_tx_desc *tx_desc;
1151 struct pch_gbe_buffer *buffer_info;
1152 struct sk_buff *tmp_skb;
1153 unsigned int frame_ctrl;
1154 unsigned int ring_num;
1155
1156
1157 frame_ctrl = 0;
1158 if (unlikely(skb->len < PCH_GBE_SHORT_PKT))
1159 frame_ctrl |= PCH_GBE_TXD_CTRL_APAD;
1160 if (skb->ip_summed == CHECKSUM_NONE)
1161 frame_ctrl |= PCH_GBE_TXD_CTRL_TCPIP_ACC_OFF;
1162
1163
1164
1165
1166
1167
1168 if (skb->len < PCH_GBE_SHORT_PKT && skb->ip_summed != CHECKSUM_NONE) {
1169 frame_ctrl |= PCH_GBE_TXD_CTRL_APAD |
1170 PCH_GBE_TXD_CTRL_TCPIP_ACC_OFF;
1171 if (skb->protocol == htons(ETH_P_IP)) {
1172 struct iphdr *iph = ip_hdr(skb);
1173 unsigned int offset;
1174 offset = skb_transport_offset(skb);
1175 if (iph->protocol == IPPROTO_TCP) {
1176 skb->csum = 0;
1177 tcp_hdr(skb)->check = 0;
1178 skb->csum = skb_checksum(skb, offset,
1179 skb->len - offset, 0);
1180 tcp_hdr(skb)->check =
1181 csum_tcpudp_magic(iph->saddr,
1182 iph->daddr,
1183 skb->len - offset,
1184 IPPROTO_TCP,
1185 skb->csum);
1186 } else if (iph->protocol == IPPROTO_UDP) {
1187 skb->csum = 0;
1188 udp_hdr(skb)->check = 0;
1189 skb->csum =
1190 skb_checksum(skb, offset,
1191 skb->len - offset, 0);
1192 udp_hdr(skb)->check =
1193 csum_tcpudp_magic(iph->saddr,
1194 iph->daddr,
1195 skb->len - offset,
1196 IPPROTO_UDP,
1197 skb->csum);
1198 }
1199 }
1200 }
1201
1202 ring_num = tx_ring->next_to_use;
1203 if (unlikely((ring_num + 1) == tx_ring->count))
1204 tx_ring->next_to_use = 0;
1205 else
1206 tx_ring->next_to_use = ring_num + 1;
1207
1208
1209 buffer_info = &tx_ring->buffer_info[ring_num];
1210 tmp_skb = buffer_info->skb;
1211
1212
1213 memcpy(tmp_skb->data, skb->data, ETH_HLEN);
1214 tmp_skb->data[ETH_HLEN] = 0x00;
1215 tmp_skb->data[ETH_HLEN + 1] = 0x00;
1216 tmp_skb->len = skb->len;
1217 memcpy(&tmp_skb->data[ETH_HLEN + 2], &skb->data[ETH_HLEN],
1218 (skb->len - ETH_HLEN));
1219
1220 buffer_info->length = tmp_skb->len;
1221 buffer_info->dma = dma_map_single(&adapter->pdev->dev, tmp_skb->data,
1222 buffer_info->length,
1223 DMA_TO_DEVICE);
1224 if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) {
1225 netdev_err(adapter->netdev, "TX DMA map failed\n");
1226 buffer_info->dma = 0;
1227 buffer_info->time_stamp = 0;
1228 tx_ring->next_to_use = ring_num;
1229 return;
1230 }
1231 buffer_info->mapped = true;
1232 buffer_info->time_stamp = jiffies;
1233
1234
1235 tx_desc = PCH_GBE_TX_DESC(*tx_ring, ring_num);
1236 tx_desc->buffer_addr = (buffer_info->dma);
1237 tx_desc->length = (tmp_skb->len);
1238 tx_desc->tx_words_eob = ((tmp_skb->len + 3));
1239 tx_desc->tx_frame_ctrl = (frame_ctrl);
1240 tx_desc->gbec_status = (DSC_INIT16);
1241
1242 if (unlikely(++ring_num == tx_ring->count))
1243 ring_num = 0;
1244
1245
1246 iowrite32(tx_ring->dma +
1247 (int)sizeof(struct pch_gbe_tx_desc) * ring_num,
1248 &hw->reg->TX_DSC_SW_P);
1249
1250 pch_tx_timestamp(adapter, skb);
1251
1252 dev_kfree_skb_any(skb);
1253}
1254
1255
1256
1257
1258
1259void pch_gbe_update_stats(struct pch_gbe_adapter *adapter)
1260{
1261 struct net_device *netdev = adapter->netdev;
1262 struct pci_dev *pdev = adapter->pdev;
1263 struct pch_gbe_hw_stats *stats = &adapter->stats;
1264 unsigned long flags;
1265
1266
1267
1268
1269
1270 if ((pdev->error_state) && (pdev->error_state != pci_channel_io_normal))
1271 return;
1272
1273 spin_lock_irqsave(&adapter->stats_lock, flags);
1274
1275
1276 stats->rx_errors = stats->rx_crc_errors + stats->rx_frame_errors;
1277 stats->tx_errors = stats->tx_length_errors +
1278 stats->tx_aborted_errors +
1279 stats->tx_carrier_errors + stats->tx_timeout_count;
1280
1281
1282 netdev->stats.rx_packets = stats->rx_packets;
1283 netdev->stats.rx_bytes = stats->rx_bytes;
1284 netdev->stats.rx_dropped = stats->rx_dropped;
1285 netdev->stats.tx_packets = stats->tx_packets;
1286 netdev->stats.tx_bytes = stats->tx_bytes;
1287 netdev->stats.tx_dropped = stats->tx_dropped;
1288
1289 netdev->stats.multicast = stats->multicast;
1290 netdev->stats.collisions = stats->collisions;
1291
1292 netdev->stats.rx_errors = stats->rx_errors;
1293 netdev->stats.rx_crc_errors = stats->rx_crc_errors;
1294 netdev->stats.rx_frame_errors = stats->rx_frame_errors;
1295
1296 netdev->stats.tx_errors = stats->tx_errors;
1297 netdev->stats.tx_aborted_errors = stats->tx_aborted_errors;
1298 netdev->stats.tx_carrier_errors = stats->tx_carrier_errors;
1299
1300 spin_unlock_irqrestore(&adapter->stats_lock, flags);
1301}
1302
1303static void pch_gbe_disable_dma_rx(struct pch_gbe_hw *hw)
1304{
1305 u32 rxdma;
1306
1307
1308 rxdma = ioread32(&hw->reg->DMA_CTRL);
1309 rxdma &= ~PCH_GBE_RX_DMA_EN;
1310 iowrite32(rxdma, &hw->reg->DMA_CTRL);
1311}
1312
1313static void pch_gbe_enable_dma_rx(struct pch_gbe_hw *hw)
1314{
1315 u32 rxdma;
1316
1317
1318 rxdma = ioread32(&hw->reg->DMA_CTRL);
1319 rxdma |= PCH_GBE_RX_DMA_EN;
1320 iowrite32(rxdma, &hw->reg->DMA_CTRL);
1321}
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331static irqreturn_t pch_gbe_intr(int irq, void *data)
1332{
1333 struct net_device *netdev = data;
1334 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
1335 struct pch_gbe_hw *hw = &adapter->hw;
1336 u32 int_st;
1337 u32 int_en;
1338
1339
1340 int_st = ioread32(&hw->reg->INT_ST);
1341 int_st = int_st & ioread32(&hw->reg->INT_EN);
1342
1343 if (unlikely(!int_st))
1344 return IRQ_NONE;
1345 netdev_dbg(netdev, "%s occur int_st = 0x%08x\n", __func__, int_st);
1346 if (int_st & PCH_GBE_INT_RX_FRAME_ERR)
1347 adapter->stats.intr_rx_frame_err_count++;
1348 if (int_st & PCH_GBE_INT_RX_FIFO_ERR)
1349 if (!adapter->rx_stop_flag) {
1350 adapter->stats.intr_rx_fifo_err_count++;
1351 netdev_dbg(netdev, "Rx fifo over run\n");
1352 adapter->rx_stop_flag = true;
1353 int_en = ioread32(&hw->reg->INT_EN);
1354 iowrite32((int_en & ~PCH_GBE_INT_RX_FIFO_ERR),
1355 &hw->reg->INT_EN);
1356 pch_gbe_disable_dma_rx(&adapter->hw);
1357 int_st |= ioread32(&hw->reg->INT_ST);
1358 int_st = int_st & ioread32(&hw->reg->INT_EN);
1359 }
1360 if (int_st & PCH_GBE_INT_RX_DMA_ERR)
1361 adapter->stats.intr_rx_dma_err_count++;
1362 if (int_st & PCH_GBE_INT_TX_FIFO_ERR)
1363 adapter->stats.intr_tx_fifo_err_count++;
1364 if (int_st & PCH_GBE_INT_TX_DMA_ERR)
1365 adapter->stats.intr_tx_dma_err_count++;
1366 if (int_st & PCH_GBE_INT_TCPIP_ERR)
1367 adapter->stats.intr_tcpip_err_count++;
1368
1369 if ((int_st & PCH_GBE_INT_RX_DSC_EMP)) {
1370 adapter->stats.intr_rx_dsc_empty_count++;
1371 netdev_dbg(netdev, "Rx descriptor is empty\n");
1372 int_en = ioread32(&hw->reg->INT_EN);
1373 iowrite32((int_en & ~PCH_GBE_INT_RX_DSC_EMP), &hw->reg->INT_EN);
1374 if (hw->mac.tx_fc_enable) {
1375
1376 pch_gbe_mac_set_pause_packet(hw);
1377 }
1378 }
1379
1380
1381 if ((int_st & (PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT)) ||
1382 (adapter->rx_stop_flag)) {
1383 if (likely(napi_schedule_prep(&adapter->napi))) {
1384
1385 atomic_inc(&adapter->irq_sem);
1386 int_en = ioread32(&hw->reg->INT_EN);
1387 int_en &=
1388 ~(PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT);
1389 iowrite32(int_en, &hw->reg->INT_EN);
1390
1391 __napi_schedule(&adapter->napi);
1392 }
1393 }
1394 netdev_dbg(netdev, "return = 0x%08x INT_EN reg = 0x%08x\n",
1395 IRQ_HANDLED, ioread32(&hw->reg->INT_EN));
1396 return IRQ_HANDLED;
1397}
1398
1399
1400
1401
1402
1403
1404
1405static void
1406pch_gbe_alloc_rx_buffers(struct pch_gbe_adapter *adapter,
1407 struct pch_gbe_rx_ring *rx_ring, int cleaned_count)
1408{
1409 struct net_device *netdev = adapter->netdev;
1410 struct pci_dev *pdev = adapter->pdev;
1411 struct pch_gbe_hw *hw = &adapter->hw;
1412 struct pch_gbe_rx_desc *rx_desc;
1413 struct pch_gbe_buffer *buffer_info;
1414 struct sk_buff *skb;
1415 unsigned int i;
1416 unsigned int bufsz;
1417
1418 bufsz = adapter->rx_buffer_len + NET_IP_ALIGN;
1419 i = rx_ring->next_to_use;
1420
1421 while ((cleaned_count--)) {
1422 buffer_info = &rx_ring->buffer_info[i];
1423 skb = netdev_alloc_skb(netdev, bufsz);
1424 if (unlikely(!skb)) {
1425
1426 adapter->stats.rx_alloc_buff_failed++;
1427 break;
1428 }
1429
1430 skb_reserve(skb, NET_IP_ALIGN);
1431 buffer_info->skb = skb;
1432
1433 buffer_info->dma = dma_map_single(&pdev->dev,
1434 buffer_info->rx_buffer,
1435 buffer_info->length,
1436 DMA_FROM_DEVICE);
1437 if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) {
1438 dev_kfree_skb(skb);
1439 buffer_info->skb = NULL;
1440 buffer_info->dma = 0;
1441 adapter->stats.rx_alloc_buff_failed++;
1442 break;
1443 }
1444 buffer_info->mapped = true;
1445 rx_desc = PCH_GBE_RX_DESC(*rx_ring, i);
1446 rx_desc->buffer_addr = (buffer_info->dma);
1447 rx_desc->gbec_status = DSC_INIT16;
1448
1449 netdev_dbg(netdev,
1450 "i = %d buffer_info->dma = 0x08%llx buffer_info->length = 0x%x\n",
1451 i, (unsigned long long)buffer_info->dma,
1452 buffer_info->length);
1453
1454 if (unlikely(++i == rx_ring->count))
1455 i = 0;
1456 }
1457 if (likely(rx_ring->next_to_use != i)) {
1458 rx_ring->next_to_use = i;
1459 if (unlikely(i-- == 0))
1460 i = (rx_ring->count - 1);
1461 iowrite32(rx_ring->dma +
1462 (int)sizeof(struct pch_gbe_rx_desc) * i,
1463 &hw->reg->RX_DSC_SW_P);
1464 }
1465 return;
1466}
1467
1468static int
1469pch_gbe_alloc_rx_buffers_pool(struct pch_gbe_adapter *adapter,
1470 struct pch_gbe_rx_ring *rx_ring, int cleaned_count)
1471{
1472 struct pci_dev *pdev = adapter->pdev;
1473 struct pch_gbe_buffer *buffer_info;
1474 unsigned int i;
1475 unsigned int bufsz;
1476 unsigned int size;
1477
1478 bufsz = adapter->rx_buffer_len;
1479
1480 size = rx_ring->count * bufsz + PCH_GBE_RESERVE_MEMORY;
1481 rx_ring->rx_buff_pool =
1482 dma_zalloc_coherent(&pdev->dev, size,
1483 &rx_ring->rx_buff_pool_logic, GFP_KERNEL);
1484 if (!rx_ring->rx_buff_pool)
1485 return -ENOMEM;
1486
1487 rx_ring->rx_buff_pool_size = size;
1488 for (i = 0; i < rx_ring->count; i++) {
1489 buffer_info = &rx_ring->buffer_info[i];
1490 buffer_info->rx_buffer = rx_ring->rx_buff_pool + bufsz * i;
1491 buffer_info->length = bufsz;
1492 }
1493 return 0;
1494}
1495
1496
1497
1498
1499
1500
1501static void pch_gbe_alloc_tx_buffers(struct pch_gbe_adapter *adapter,
1502 struct pch_gbe_tx_ring *tx_ring)
1503{
1504 struct pch_gbe_buffer *buffer_info;
1505 struct sk_buff *skb;
1506 unsigned int i;
1507 unsigned int bufsz;
1508 struct pch_gbe_tx_desc *tx_desc;
1509
1510 bufsz =
1511 adapter->hw.mac.max_frame_size + PCH_GBE_DMA_ALIGN + NET_IP_ALIGN;
1512
1513 for (i = 0; i < tx_ring->count; i++) {
1514 buffer_info = &tx_ring->buffer_info[i];
1515 skb = netdev_alloc_skb(adapter->netdev, bufsz);
1516 skb_reserve(skb, PCH_GBE_DMA_ALIGN);
1517 buffer_info->skb = skb;
1518 tx_desc = PCH_GBE_TX_DESC(*tx_ring, i);
1519 tx_desc->gbec_status = (DSC_INIT16);
1520 }
1521 return;
1522}
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532static bool
1533pch_gbe_clean_tx(struct pch_gbe_adapter *adapter,
1534 struct pch_gbe_tx_ring *tx_ring)
1535{
1536 struct pch_gbe_tx_desc *tx_desc;
1537 struct pch_gbe_buffer *buffer_info;
1538 struct sk_buff *skb;
1539 unsigned int i;
1540 unsigned int cleaned_count = 0;
1541 bool cleaned = false;
1542 int unused, thresh;
1543
1544 netdev_dbg(adapter->netdev, "next_to_clean : %d\n",
1545 tx_ring->next_to_clean);
1546
1547 i = tx_ring->next_to_clean;
1548 tx_desc = PCH_GBE_TX_DESC(*tx_ring, i);
1549 netdev_dbg(adapter->netdev, "gbec_status:0x%04x dma_status:0x%04x\n",
1550 tx_desc->gbec_status, tx_desc->dma_status);
1551
1552 unused = PCH_GBE_DESC_UNUSED(tx_ring);
1553 thresh = tx_ring->count - PCH_GBE_TX_WEIGHT;
1554 if ((tx_desc->gbec_status == DSC_INIT16) && (unused < thresh))
1555 {
1556 int j, k;
1557 if (unused < 8) {
1558 netdev_dbg(adapter->netdev,
1559 "clean_tx: transmit queue warning (%x,%x) unused=%d\n",
1560 tx_ring->next_to_clean, tx_ring->next_to_use,
1561 unused);
1562 }
1563
1564
1565 k = i;
1566 for (j = 0; j < PCH_GBE_TX_WEIGHT; j++)
1567 {
1568 tx_desc = PCH_GBE_TX_DESC(*tx_ring, k);
1569 if (tx_desc->gbec_status != DSC_INIT16) break;
1570 if (++k >= tx_ring->count) k = 0;
1571 }
1572 if (j < PCH_GBE_TX_WEIGHT) {
1573 netdev_dbg(adapter->netdev,
1574 "clean_tx: unused=%d loops=%d found tx_desc[%x,%x:%x].gbec_status=%04x\n",
1575 unused, j, i, k, tx_ring->next_to_use,
1576 tx_desc->gbec_status);
1577 i = k;
1578 }
1579 }
1580
1581 while ((tx_desc->gbec_status & DSC_INIT16) == 0x0000) {
1582 netdev_dbg(adapter->netdev, "gbec_status:0x%04x\n",
1583 tx_desc->gbec_status);
1584 buffer_info = &tx_ring->buffer_info[i];
1585 skb = buffer_info->skb;
1586 cleaned = true;
1587
1588 if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_ABT)) {
1589 adapter->stats.tx_aborted_errors++;
1590 netdev_err(adapter->netdev, "Transfer Abort Error\n");
1591 } else if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_CRSER)
1592 ) {
1593 adapter->stats.tx_carrier_errors++;
1594 netdev_err(adapter->netdev,
1595 "Transfer Carrier Sense Error\n");
1596 } else if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_EXCOL)
1597 ) {
1598 adapter->stats.tx_aborted_errors++;
1599 netdev_err(adapter->netdev,
1600 "Transfer Collision Abort Error\n");
1601 } else if ((tx_desc->gbec_status &
1602 (PCH_GBE_TXD_GMAC_STAT_SNGCOL |
1603 PCH_GBE_TXD_GMAC_STAT_MLTCOL))) {
1604 adapter->stats.collisions++;
1605 adapter->stats.tx_packets++;
1606 adapter->stats.tx_bytes += skb->len;
1607 netdev_dbg(adapter->netdev, "Transfer Collision\n");
1608 } else if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_CMPLT)
1609 ) {
1610 adapter->stats.tx_packets++;
1611 adapter->stats.tx_bytes += skb->len;
1612 }
1613 if (buffer_info->mapped) {
1614 netdev_dbg(adapter->netdev,
1615 "unmap buffer_info->dma : %d\n", i);
1616 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
1617 buffer_info->length, DMA_TO_DEVICE);
1618 buffer_info->mapped = false;
1619 }
1620 if (buffer_info->skb) {
1621 netdev_dbg(adapter->netdev,
1622 "trim buffer_info->skb : %d\n", i);
1623 skb_trim(buffer_info->skb, 0);
1624 }
1625 tx_desc->gbec_status = DSC_INIT16;
1626 if (unlikely(++i == tx_ring->count))
1627 i = 0;
1628 tx_desc = PCH_GBE_TX_DESC(*tx_ring, i);
1629
1630
1631 if (cleaned_count++ == PCH_GBE_TX_WEIGHT) {
1632 cleaned = false;
1633 break;
1634 }
1635 }
1636 netdev_dbg(adapter->netdev,
1637 "called pch_gbe_unmap_and_free_tx_resource() %d count\n",
1638 cleaned_count);
1639 if (cleaned_count > 0) {
1640
1641 netif_tx_lock(adapter->netdev);
1642 if (unlikely(cleaned && (netif_queue_stopped(adapter->netdev))))
1643 {
1644 netif_wake_queue(adapter->netdev);
1645 adapter->stats.tx_restart_count++;
1646 netdev_dbg(adapter->netdev, "Tx wake queue\n");
1647 }
1648
1649 tx_ring->next_to_clean = i;
1650
1651 netdev_dbg(adapter->netdev, "next_to_clean : %d\n",
1652 tx_ring->next_to_clean);
1653 netif_tx_unlock(adapter->netdev);
1654 }
1655 return cleaned;
1656}
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668static bool
1669pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
1670 struct pch_gbe_rx_ring *rx_ring,
1671 int *work_done, int work_to_do)
1672{
1673 struct net_device *netdev = adapter->netdev;
1674 struct pci_dev *pdev = adapter->pdev;
1675 struct pch_gbe_buffer *buffer_info;
1676 struct pch_gbe_rx_desc *rx_desc;
1677 u32 length;
1678 unsigned int i;
1679 unsigned int cleaned_count = 0;
1680 bool cleaned = false;
1681 struct sk_buff *skb;
1682 u8 dma_status;
1683 u16 gbec_status;
1684 u32 tcp_ip_status;
1685
1686 i = rx_ring->next_to_clean;
1687
1688 while (*work_done < work_to_do) {
1689
1690 rx_desc = PCH_GBE_RX_DESC(*rx_ring, i);
1691 if (rx_desc->gbec_status == DSC_INIT16)
1692 break;
1693 cleaned = true;
1694 cleaned_count++;
1695
1696 dma_status = rx_desc->dma_status;
1697 gbec_status = rx_desc->gbec_status;
1698 tcp_ip_status = rx_desc->tcp_ip_status;
1699 rx_desc->gbec_status = DSC_INIT16;
1700 buffer_info = &rx_ring->buffer_info[i];
1701 skb = buffer_info->skb;
1702 buffer_info->skb = NULL;
1703
1704
1705 dma_unmap_single(&pdev->dev, buffer_info->dma,
1706 buffer_info->length, DMA_FROM_DEVICE);
1707 buffer_info->mapped = false;
1708
1709 netdev_dbg(netdev,
1710 "RxDecNo = 0x%04x Status[DMA:0x%02x GBE:0x%04x TCP:0x%08x] BufInf = 0x%p\n",
1711 i, dma_status, gbec_status, tcp_ip_status,
1712 buffer_info);
1713
1714 if (unlikely(gbec_status & PCH_GBE_RXD_GMAC_STAT_NOTOCTAL)) {
1715 adapter->stats.rx_frame_errors++;
1716 netdev_err(netdev, "Receive Not Octal Error\n");
1717 } else if (unlikely(gbec_status &
1718 PCH_GBE_RXD_GMAC_STAT_NBLERR)) {
1719 adapter->stats.rx_frame_errors++;
1720 netdev_err(netdev, "Receive Nibble Error\n");
1721 } else if (unlikely(gbec_status &
1722 PCH_GBE_RXD_GMAC_STAT_CRCERR)) {
1723 adapter->stats.rx_crc_errors++;
1724 netdev_err(netdev, "Receive CRC Error\n");
1725 } else {
1726
1727
1728 length = (rx_desc->rx_words_eob) - 3 - ETH_FCS_LEN;
1729 if (rx_desc->rx_words_eob & 0x02)
1730 length = length - 4;
1731
1732
1733
1734
1735 memcpy(skb->data, buffer_info->rx_buffer, length);
1736
1737
1738 adapter->stats.rx_bytes += length;
1739 adapter->stats.rx_packets++;
1740 if ((gbec_status & PCH_GBE_RXD_GMAC_STAT_MARMLT))
1741 adapter->stats.multicast++;
1742
1743 skb_put(skb, length);
1744
1745 pch_rx_timestamp(adapter, skb);
1746
1747 skb->protocol = eth_type_trans(skb, netdev);
1748 if (tcp_ip_status & PCH_GBE_RXD_ACC_STAT_TCPIPOK)
1749 skb->ip_summed = CHECKSUM_UNNECESSARY;
1750 else
1751 skb->ip_summed = CHECKSUM_NONE;
1752
1753 napi_gro_receive(&adapter->napi, skb);
1754 (*work_done)++;
1755 netdev_dbg(netdev,
1756 "Receive skb->ip_summed: %d length: %d\n",
1757 skb->ip_summed, length);
1758 }
1759
1760 if (unlikely(cleaned_count >= PCH_GBE_RX_BUFFER_WRITE)) {
1761 pch_gbe_alloc_rx_buffers(adapter, rx_ring,
1762 cleaned_count);
1763 cleaned_count = 0;
1764 }
1765 if (++i == rx_ring->count)
1766 i = 0;
1767 }
1768 rx_ring->next_to_clean = i;
1769 if (cleaned_count)
1770 pch_gbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
1771 return cleaned;
1772}
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter,
1783 struct pch_gbe_tx_ring *tx_ring)
1784{
1785 struct pci_dev *pdev = adapter->pdev;
1786 struct pch_gbe_tx_desc *tx_desc;
1787 int size;
1788 int desNo;
1789
1790 size = (int)sizeof(struct pch_gbe_buffer) * tx_ring->count;
1791 tx_ring->buffer_info = vzalloc(size);
1792 if (!tx_ring->buffer_info)
1793 return -ENOMEM;
1794
1795 tx_ring->size = tx_ring->count * (int)sizeof(struct pch_gbe_tx_desc);
1796
1797 tx_ring->desc = dma_zalloc_coherent(&pdev->dev, tx_ring->size,
1798 &tx_ring->dma, GFP_KERNEL);
1799 if (!tx_ring->desc) {
1800 vfree(tx_ring->buffer_info);
1801 return -ENOMEM;
1802 }
1803
1804 tx_ring->next_to_use = 0;
1805 tx_ring->next_to_clean = 0;
1806
1807 for (desNo = 0; desNo < tx_ring->count; desNo++) {
1808 tx_desc = PCH_GBE_TX_DESC(*tx_ring, desNo);
1809 tx_desc->gbec_status = DSC_INIT16;
1810 }
1811 netdev_dbg(adapter->netdev,
1812 "tx_ring->desc = 0x%p tx_ring->dma = 0x%08llx next_to_clean = 0x%08x next_to_use = 0x%08x\n",
1813 tx_ring->desc, (unsigned long long)tx_ring->dma,
1814 tx_ring->next_to_clean, tx_ring->next_to_use);
1815 return 0;
1816}
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826int pch_gbe_setup_rx_resources(struct pch_gbe_adapter *adapter,
1827 struct pch_gbe_rx_ring *rx_ring)
1828{
1829 struct pci_dev *pdev = adapter->pdev;
1830 struct pch_gbe_rx_desc *rx_desc;
1831 int size;
1832 int desNo;
1833
1834 size = (int)sizeof(struct pch_gbe_buffer) * rx_ring->count;
1835 rx_ring->buffer_info = vzalloc(size);
1836 if (!rx_ring->buffer_info)
1837 return -ENOMEM;
1838
1839 rx_ring->size = rx_ring->count * (int)sizeof(struct pch_gbe_rx_desc);
1840 rx_ring->desc = dma_zalloc_coherent(&pdev->dev, rx_ring->size,
1841 &rx_ring->dma, GFP_KERNEL);
1842 if (!rx_ring->desc) {
1843 vfree(rx_ring->buffer_info);
1844 return -ENOMEM;
1845 }
1846 rx_ring->next_to_clean = 0;
1847 rx_ring->next_to_use = 0;
1848 for (desNo = 0; desNo < rx_ring->count; desNo++) {
1849 rx_desc = PCH_GBE_RX_DESC(*rx_ring, desNo);
1850 rx_desc->gbec_status = DSC_INIT16;
1851 }
1852 netdev_dbg(adapter->netdev,
1853 "rx_ring->desc = 0x%p rx_ring->dma = 0x%08llx next_to_clean = 0x%08x next_to_use = 0x%08x\n",
1854 rx_ring->desc, (unsigned long long)rx_ring->dma,
1855 rx_ring->next_to_clean, rx_ring->next_to_use);
1856 return 0;
1857}
1858
1859
1860
1861
1862
1863
1864void pch_gbe_free_tx_resources(struct pch_gbe_adapter *adapter,
1865 struct pch_gbe_tx_ring *tx_ring)
1866{
1867 struct pci_dev *pdev = adapter->pdev;
1868
1869 pch_gbe_clean_tx_ring(adapter, tx_ring);
1870 vfree(tx_ring->buffer_info);
1871 tx_ring->buffer_info = NULL;
1872 pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
1873 tx_ring->desc = NULL;
1874}
1875
1876
1877
1878
1879
1880
1881void pch_gbe_free_rx_resources(struct pch_gbe_adapter *adapter,
1882 struct pch_gbe_rx_ring *rx_ring)
1883{
1884 struct pci_dev *pdev = adapter->pdev;
1885
1886 pch_gbe_clean_rx_ring(adapter, rx_ring);
1887 vfree(rx_ring->buffer_info);
1888 rx_ring->buffer_info = NULL;
1889 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
1890 rx_ring->desc = NULL;
1891}
1892
1893
1894
1895
1896
1897
1898
1899
1900static int pch_gbe_request_irq(struct pch_gbe_adapter *adapter)
1901{
1902 struct net_device *netdev = adapter->netdev;
1903 int err;
1904
1905 err = pci_alloc_irq_vectors(adapter->pdev, 1, 1, PCI_IRQ_ALL_TYPES);
1906 if (err < 0)
1907 return err;
1908
1909 adapter->irq = pci_irq_vector(adapter->pdev, 0);
1910
1911 err = request_irq(adapter->irq, &pch_gbe_intr, IRQF_SHARED,
1912 netdev->name, netdev);
1913 if (err)
1914 netdev_err(netdev, "Unable to allocate interrupt Error: %d\n",
1915 err);
1916 netdev_dbg(netdev, "have_msi : %d return : 0x%04x\n",
1917 pci_dev_msi_enabled(adapter->pdev), err);
1918 return err;
1919}
1920
1921
1922
1923
1924
1925
1926
1927
1928int pch_gbe_up(struct pch_gbe_adapter *adapter)
1929{
1930 struct net_device *netdev = adapter->netdev;
1931 struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring;
1932 struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring;
1933 int err = -EINVAL;
1934
1935
1936 if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
1937 netdev_err(netdev, "Error: Invalid MAC address\n");
1938 goto out;
1939 }
1940
1941
1942 pch_gbe_set_multi(netdev);
1943
1944 pch_gbe_setup_tctl(adapter);
1945 pch_gbe_configure_tx(adapter);
1946 pch_gbe_setup_rctl(adapter);
1947 pch_gbe_configure_rx(adapter);
1948
1949 err = pch_gbe_request_irq(adapter);
1950 if (err) {
1951 netdev_err(netdev,
1952 "Error: can't bring device up - irq request failed\n");
1953 goto out;
1954 }
1955 err = pch_gbe_alloc_rx_buffers_pool(adapter, rx_ring, rx_ring->count);
1956 if (err) {
1957 netdev_err(netdev,
1958 "Error: can't bring device up - alloc rx buffers pool failed\n");
1959 goto freeirq;
1960 }
1961 pch_gbe_alloc_tx_buffers(adapter, tx_ring);
1962 pch_gbe_alloc_rx_buffers(adapter, rx_ring, rx_ring->count);
1963 adapter->tx_queue_len = netdev->tx_queue_len;
1964 pch_gbe_enable_dma_rx(&adapter->hw);
1965 pch_gbe_enable_mac_rx(&adapter->hw);
1966
1967 mod_timer(&adapter->watchdog_timer, jiffies);
1968
1969 napi_enable(&adapter->napi);
1970 pch_gbe_irq_enable(adapter);
1971 netif_start_queue(adapter->netdev);
1972
1973 return 0;
1974
1975freeirq:
1976 pch_gbe_free_irq(adapter);
1977out:
1978 return err;
1979}
1980
1981
1982
1983
1984
1985void pch_gbe_down(struct pch_gbe_adapter *adapter)
1986{
1987 struct net_device *netdev = adapter->netdev;
1988 struct pci_dev *pdev = adapter->pdev;
1989 struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring;
1990
1991
1992
1993 napi_disable(&adapter->napi);
1994 atomic_set(&adapter->irq_sem, 0);
1995
1996 pch_gbe_irq_disable(adapter);
1997 pch_gbe_free_irq(adapter);
1998
1999 del_timer_sync(&adapter->watchdog_timer);
2000
2001 netdev->tx_queue_len = adapter->tx_queue_len;
2002 netif_carrier_off(netdev);
2003 netif_stop_queue(netdev);
2004
2005 if ((pdev->error_state) && (pdev->error_state != pci_channel_io_normal))
2006 pch_gbe_reset(adapter);
2007 pch_gbe_clean_tx_ring(adapter, adapter->tx_ring);
2008 pch_gbe_clean_rx_ring(adapter, adapter->rx_ring);
2009
2010 pci_free_consistent(adapter->pdev, rx_ring->rx_buff_pool_size,
2011 rx_ring->rx_buff_pool, rx_ring->rx_buff_pool_logic);
2012 rx_ring->rx_buff_pool_logic = 0;
2013 rx_ring->rx_buff_pool_size = 0;
2014 rx_ring->rx_buff_pool = NULL;
2015}
2016
2017
2018
2019
2020
2021
2022
2023
2024static int pch_gbe_sw_init(struct pch_gbe_adapter *adapter)
2025{
2026 struct pch_gbe_hw *hw = &adapter->hw;
2027 struct net_device *netdev = adapter->netdev;
2028
2029 adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_2048;
2030 hw->mac.max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
2031 hw->mac.min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
2032
2033
2034 if (pch_gbe_hal_setup_init_funcs(hw)) {
2035 netdev_err(netdev, "Hardware Initialization Failure\n");
2036 return -EIO;
2037 }
2038 if (pch_gbe_alloc_queues(adapter)) {
2039 netdev_err(netdev, "Unable to allocate memory for queues\n");
2040 return -ENOMEM;
2041 }
2042 spin_lock_init(&adapter->hw.miim_lock);
2043 spin_lock_init(&adapter->stats_lock);
2044 spin_lock_init(&adapter->ethtool_lock);
2045 atomic_set(&adapter->irq_sem, 0);
2046 pch_gbe_irq_disable(adapter);
2047
2048 pch_gbe_init_stats(adapter);
2049
2050 netdev_dbg(netdev,
2051 "rx_buffer_len : %d mac.min_frame_size : %d mac.max_frame_size : %d\n",
2052 (u32) adapter->rx_buffer_len,
2053 hw->mac.min_frame_size, hw->mac.max_frame_size);
2054 return 0;
2055}
2056
2057
2058
2059
2060
2061
2062
2063
2064static int pch_gbe_open(struct net_device *netdev)
2065{
2066 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2067 struct pch_gbe_hw *hw = &adapter->hw;
2068 int err;
2069
2070
2071 err = pch_gbe_setup_tx_resources(adapter, adapter->tx_ring);
2072 if (err)
2073 goto err_setup_tx;
2074
2075 err = pch_gbe_setup_rx_resources(adapter, adapter->rx_ring);
2076 if (err)
2077 goto err_setup_rx;
2078 pch_gbe_hal_power_up_phy(hw);
2079 err = pch_gbe_up(adapter);
2080 if (err)
2081 goto err_up;
2082 netdev_dbg(netdev, "Success End\n");
2083 return 0;
2084
2085err_up:
2086 if (!adapter->wake_up_evt)
2087 pch_gbe_hal_power_down_phy(hw);
2088 pch_gbe_free_rx_resources(adapter, adapter->rx_ring);
2089err_setup_rx:
2090 pch_gbe_free_tx_resources(adapter, adapter->tx_ring);
2091err_setup_tx:
2092 pch_gbe_reset(adapter);
2093 netdev_err(netdev, "Error End\n");
2094 return err;
2095}
2096
2097
2098
2099
2100
2101
2102
2103static int pch_gbe_stop(struct net_device *netdev)
2104{
2105 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2106 struct pch_gbe_hw *hw = &adapter->hw;
2107
2108 pch_gbe_down(adapter);
2109 if (!adapter->wake_up_evt)
2110 pch_gbe_hal_power_down_phy(hw);
2111 pch_gbe_free_tx_resources(adapter, adapter->tx_ring);
2112 pch_gbe_free_rx_resources(adapter, adapter->rx_ring);
2113 return 0;
2114}
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124static int pch_gbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2125{
2126 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2127 struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring;
2128
2129 if (unlikely(!PCH_GBE_DESC_UNUSED(tx_ring))) {
2130 netif_stop_queue(netdev);
2131 netdev_dbg(netdev,
2132 "Return : BUSY next_to use : 0x%08x next_to clean : 0x%08x\n",
2133 tx_ring->next_to_use, tx_ring->next_to_clean);
2134 return NETDEV_TX_BUSY;
2135 }
2136
2137
2138 pch_gbe_tx_queue(adapter, tx_ring, skb);
2139 return NETDEV_TX_OK;
2140}
2141
2142
2143
2144
2145
2146static void pch_gbe_set_multi(struct net_device *netdev)
2147{
2148 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2149 struct pch_gbe_hw *hw = &adapter->hw;
2150 struct netdev_hw_addr *ha;
2151 u8 *mta_list;
2152 u32 rctl;
2153 int i;
2154 int mc_count;
2155
2156 netdev_dbg(netdev, "netdev->flags : 0x%08x\n", netdev->flags);
2157
2158
2159 rctl = ioread32(&hw->reg->RX_MODE);
2160 mc_count = netdev_mc_count(netdev);
2161 if ((netdev->flags & IFF_PROMISC)) {
2162 rctl &= ~PCH_GBE_ADD_FIL_EN;
2163 rctl &= ~PCH_GBE_MLT_FIL_EN;
2164 } else if ((netdev->flags & IFF_ALLMULTI)) {
2165
2166 rctl |= PCH_GBE_ADD_FIL_EN;
2167 rctl &= ~PCH_GBE_MLT_FIL_EN;
2168 } else {
2169 if (mc_count >= PCH_GBE_MAR_ENTRIES) {
2170
2171 rctl |= PCH_GBE_ADD_FIL_EN;
2172 rctl &= ~PCH_GBE_MLT_FIL_EN;
2173 } else {
2174 rctl |= (PCH_GBE_ADD_FIL_EN | PCH_GBE_MLT_FIL_EN);
2175 }
2176 }
2177 iowrite32(rctl, &hw->reg->RX_MODE);
2178
2179 if (mc_count >= PCH_GBE_MAR_ENTRIES)
2180 return;
2181 mta_list = kmalloc(mc_count * ETH_ALEN, GFP_ATOMIC);
2182 if (!mta_list)
2183 return;
2184
2185
2186 i = 0;
2187 netdev_for_each_mc_addr(ha, netdev) {
2188 if (i == mc_count)
2189 break;
2190 memcpy(mta_list + (i++ * ETH_ALEN), &ha->addr, ETH_ALEN);
2191 }
2192 pch_gbe_mac_mc_addr_list_update(hw, mta_list, i, 1,
2193 PCH_GBE_MAR_ENTRIES);
2194 kfree(mta_list);
2195
2196 netdev_dbg(netdev,
2197 "RX_MODE reg(check bit31,30 ADD,MLT) : 0x%08x netdev->mc_count : 0x%08x\n",
2198 ioread32(&hw->reg->RX_MODE), mc_count);
2199}
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209static int pch_gbe_set_mac(struct net_device *netdev, void *addr)
2210{
2211 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2212 struct sockaddr *skaddr = addr;
2213 int ret_val;
2214
2215 if (!is_valid_ether_addr(skaddr->sa_data)) {
2216 ret_val = -EADDRNOTAVAIL;
2217 } else {
2218 memcpy(netdev->dev_addr, skaddr->sa_data, netdev->addr_len);
2219 memcpy(adapter->hw.mac.addr, skaddr->sa_data, netdev->addr_len);
2220 pch_gbe_mac_mar_set(&adapter->hw, adapter->hw.mac.addr, 0);
2221 ret_val = 0;
2222 }
2223 netdev_dbg(netdev, "ret_val : 0x%08x\n", ret_val);
2224 netdev_dbg(netdev, "dev_addr : %pM\n", netdev->dev_addr);
2225 netdev_dbg(netdev, "mac_addr : %pM\n", adapter->hw.mac.addr);
2226 netdev_dbg(netdev, "MAC_ADR1AB reg : 0x%08x 0x%08x\n",
2227 ioread32(&adapter->hw.reg->mac_adr[0].high),
2228 ioread32(&adapter->hw.reg->mac_adr[0].low));
2229 return ret_val;
2230}
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240static int pch_gbe_change_mtu(struct net_device *netdev, int new_mtu)
2241{
2242 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2243 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
2244 unsigned long old_rx_buffer_len = adapter->rx_buffer_len;
2245 int err;
2246
2247 if (max_frame <= PCH_GBE_FRAME_SIZE_2048)
2248 adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_2048;
2249 else if (max_frame <= PCH_GBE_FRAME_SIZE_4096)
2250 adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_4096;
2251 else if (max_frame <= PCH_GBE_FRAME_SIZE_8192)
2252 adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_8192;
2253 else
2254 adapter->rx_buffer_len = PCH_GBE_MAX_RX_BUFFER_SIZE;
2255
2256 if (netif_running(netdev)) {
2257 pch_gbe_down(adapter);
2258 err = pch_gbe_up(adapter);
2259 if (err) {
2260 adapter->rx_buffer_len = old_rx_buffer_len;
2261 pch_gbe_up(adapter);
2262 return err;
2263 } else {
2264 netdev->mtu = new_mtu;
2265 adapter->hw.mac.max_frame_size = max_frame;
2266 }
2267 } else {
2268 pch_gbe_reset(adapter);
2269 netdev->mtu = new_mtu;
2270 adapter->hw.mac.max_frame_size = max_frame;
2271 }
2272
2273 netdev_dbg(netdev,
2274 "max_frame : %d rx_buffer_len : %d mtu : %d max_frame_size : %d\n",
2275 max_frame, (u32) adapter->rx_buffer_len, netdev->mtu,
2276 adapter->hw.mac.max_frame_size);
2277 return 0;
2278}
2279
2280
2281
2282
2283
2284
2285
2286
2287static int pch_gbe_set_features(struct net_device *netdev,
2288 netdev_features_t features)
2289{
2290 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2291 netdev_features_t changed = features ^ netdev->features;
2292
2293 if (!(changed & NETIF_F_RXCSUM))
2294 return 0;
2295
2296 if (netif_running(netdev))
2297 pch_gbe_reinit_locked(adapter);
2298 else
2299 pch_gbe_reset(adapter);
2300
2301 return 0;
2302}
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313static int pch_gbe_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2314{
2315 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2316
2317 netdev_dbg(netdev, "cmd : 0x%04x\n", cmd);
2318
2319 if (cmd == SIOCSHWTSTAMP)
2320 return hwtstamp_ioctl(netdev, ifr, cmd);
2321
2322 return generic_mii_ioctl(&adapter->mii, if_mii(ifr), cmd, NULL);
2323}
2324
2325
2326
2327
2328
2329static void pch_gbe_tx_timeout(struct net_device *netdev)
2330{
2331 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2332
2333
2334 adapter->stats.tx_timeout_count++;
2335 schedule_work(&adapter->reset_task);
2336}
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346static int pch_gbe_napi_poll(struct napi_struct *napi, int budget)
2347{
2348 struct pch_gbe_adapter *adapter =
2349 container_of(napi, struct pch_gbe_adapter, napi);
2350 int work_done = 0;
2351 bool poll_end_flag = false;
2352 bool cleaned = false;
2353
2354 netdev_dbg(adapter->netdev, "budget : %d\n", budget);
2355
2356 pch_gbe_clean_rx(adapter, adapter->rx_ring, &work_done, budget);
2357 cleaned = pch_gbe_clean_tx(adapter, adapter->tx_ring);
2358
2359 if (cleaned)
2360 work_done = budget;
2361
2362
2363
2364 if (work_done < budget)
2365 poll_end_flag = true;
2366
2367 if (poll_end_flag) {
2368 napi_complete_done(napi, work_done);
2369 pch_gbe_irq_enable(adapter);
2370 }
2371
2372 if (adapter->rx_stop_flag) {
2373 adapter->rx_stop_flag = false;
2374 pch_gbe_enable_dma_rx(&adapter->hw);
2375 }
2376
2377 netdev_dbg(adapter->netdev,
2378 "poll_end_flag : %d work_done : %d budget : %d\n",
2379 poll_end_flag, work_done, budget);
2380
2381 return work_done;
2382}
2383
2384#ifdef CONFIG_NET_POLL_CONTROLLER
2385
2386
2387
2388
2389static void pch_gbe_netpoll(struct net_device *netdev)
2390{
2391 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2392
2393 disable_irq(adapter->irq);
2394 pch_gbe_intr(adapter->irq, netdev);
2395 enable_irq(adapter->irq);
2396}
2397#endif
2398
2399static const struct net_device_ops pch_gbe_netdev_ops = {
2400 .ndo_open = pch_gbe_open,
2401 .ndo_stop = pch_gbe_stop,
2402 .ndo_start_xmit = pch_gbe_xmit_frame,
2403 .ndo_set_mac_address = pch_gbe_set_mac,
2404 .ndo_tx_timeout = pch_gbe_tx_timeout,
2405 .ndo_change_mtu = pch_gbe_change_mtu,
2406 .ndo_set_features = pch_gbe_set_features,
2407 .ndo_do_ioctl = pch_gbe_ioctl,
2408 .ndo_set_rx_mode = pch_gbe_set_multi,
2409#ifdef CONFIG_NET_POLL_CONTROLLER
2410 .ndo_poll_controller = pch_gbe_netpoll,
2411#endif
2412};
2413
2414static pci_ers_result_t pch_gbe_io_error_detected(struct pci_dev *pdev,
2415 pci_channel_state_t state)
2416{
2417 struct net_device *netdev = pci_get_drvdata(pdev);
2418 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2419
2420 netif_device_detach(netdev);
2421 if (netif_running(netdev))
2422 pch_gbe_down(adapter);
2423 pci_disable_device(pdev);
2424
2425 return PCI_ERS_RESULT_NEED_RESET;
2426}
2427
2428static pci_ers_result_t pch_gbe_io_slot_reset(struct pci_dev *pdev)
2429{
2430 struct net_device *netdev = pci_get_drvdata(pdev);
2431 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2432 struct pch_gbe_hw *hw = &adapter->hw;
2433
2434 if (pci_enable_device(pdev)) {
2435 netdev_err(netdev, "Cannot re-enable PCI device after reset\n");
2436 return PCI_ERS_RESULT_DISCONNECT;
2437 }
2438 pci_set_master(pdev);
2439 pci_enable_wake(pdev, PCI_D0, 0);
2440 pch_gbe_hal_power_up_phy(hw);
2441 pch_gbe_reset(adapter);
2442
2443 pch_gbe_mac_set_wol_event(hw, 0);
2444
2445 return PCI_ERS_RESULT_RECOVERED;
2446}
2447
2448static void pch_gbe_io_resume(struct pci_dev *pdev)
2449{
2450 struct net_device *netdev = pci_get_drvdata(pdev);
2451 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2452
2453 if (netif_running(netdev)) {
2454 if (pch_gbe_up(adapter)) {
2455 netdev_dbg(netdev,
2456 "can't bring device back up after reset\n");
2457 return;
2458 }
2459 }
2460 netif_device_attach(netdev);
2461}
2462
2463static int __pch_gbe_suspend(struct pci_dev *pdev)
2464{
2465 struct net_device *netdev = pci_get_drvdata(pdev);
2466 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2467 struct pch_gbe_hw *hw = &adapter->hw;
2468 u32 wufc = adapter->wake_up_evt;
2469 int retval = 0;
2470
2471 netif_device_detach(netdev);
2472 if (netif_running(netdev))
2473 pch_gbe_down(adapter);
2474 if (wufc) {
2475 pch_gbe_set_multi(netdev);
2476 pch_gbe_setup_rctl(adapter);
2477 pch_gbe_configure_rx(adapter);
2478 pch_gbe_set_rgmii_ctrl(adapter, hw->mac.link_speed,
2479 hw->mac.link_duplex);
2480 pch_gbe_set_mode(adapter, hw->mac.link_speed,
2481 hw->mac.link_duplex);
2482 pch_gbe_mac_set_wol_event(hw, wufc);
2483 pci_disable_device(pdev);
2484 } else {
2485 pch_gbe_hal_power_down_phy(hw);
2486 pch_gbe_mac_set_wol_event(hw, wufc);
2487 pci_disable_device(pdev);
2488 }
2489 return retval;
2490}
2491
2492#ifdef CONFIG_PM
2493static int pch_gbe_suspend(struct device *device)
2494{
2495 struct pci_dev *pdev = to_pci_dev(device);
2496
2497 return __pch_gbe_suspend(pdev);
2498}
2499
2500static int pch_gbe_resume(struct device *device)
2501{
2502 struct pci_dev *pdev = to_pci_dev(device);
2503 struct net_device *netdev = pci_get_drvdata(pdev);
2504 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2505 struct pch_gbe_hw *hw = &adapter->hw;
2506 u32 err;
2507
2508 err = pci_enable_device(pdev);
2509 if (err) {
2510 netdev_err(netdev, "Cannot enable PCI device from suspend\n");
2511 return err;
2512 }
2513 pci_set_master(pdev);
2514 pch_gbe_hal_power_up_phy(hw);
2515 pch_gbe_reset(adapter);
2516
2517 pch_gbe_mac_set_wol_event(hw, 0);
2518
2519 if (netif_running(netdev))
2520 pch_gbe_up(adapter);
2521 netif_device_attach(netdev);
2522
2523 return 0;
2524}
2525#endif
2526
2527static void pch_gbe_shutdown(struct pci_dev *pdev)
2528{
2529 __pch_gbe_suspend(pdev);
2530 if (system_state == SYSTEM_POWER_OFF) {
2531 pci_wake_from_d3(pdev, true);
2532 pci_set_power_state(pdev, PCI_D3hot);
2533 }
2534}
2535
2536static void pch_gbe_remove(struct pci_dev *pdev)
2537{
2538 struct net_device *netdev = pci_get_drvdata(pdev);
2539 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2540
2541 cancel_work_sync(&adapter->reset_task);
2542 unregister_netdev(netdev);
2543
2544 pch_gbe_hal_phy_hw_reset(&adapter->hw);
2545
2546 free_netdev(netdev);
2547}
2548
2549static int pch_gbe_probe(struct pci_dev *pdev,
2550 const struct pci_device_id *pci_id)
2551{
2552 struct net_device *netdev;
2553 struct pch_gbe_adapter *adapter;
2554 int ret;
2555
2556 ret = pcim_enable_device(pdev);
2557 if (ret)
2558 return ret;
2559
2560 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
2561 || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
2562 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2563 if (ret) {
2564 ret = pci_set_consistent_dma_mask(pdev,
2565 DMA_BIT_MASK(32));
2566 if (ret) {
2567 dev_err(&pdev->dev, "ERR: No usable DMA "
2568 "configuration, aborting\n");
2569 return ret;
2570 }
2571 }
2572 }
2573
2574 ret = pcim_iomap_regions(pdev, 1 << PCH_GBE_PCI_BAR, pci_name(pdev));
2575 if (ret) {
2576 dev_err(&pdev->dev,
2577 "ERR: Can't reserve PCI I/O and memory resources\n");
2578 return ret;
2579 }
2580 pci_set_master(pdev);
2581
2582 netdev = alloc_etherdev((int)sizeof(struct pch_gbe_adapter));
2583 if (!netdev)
2584 return -ENOMEM;
2585 SET_NETDEV_DEV(netdev, &pdev->dev);
2586
2587 pci_set_drvdata(pdev, netdev);
2588 adapter = netdev_priv(netdev);
2589 adapter->netdev = netdev;
2590 adapter->pdev = pdev;
2591 adapter->hw.back = adapter;
2592 adapter->hw.reg = pcim_iomap_table(pdev)[PCH_GBE_PCI_BAR];
2593 adapter->pdata = (struct pch_gbe_privdata *)pci_id->driver_data;
2594 if (adapter->pdata && adapter->pdata->platform_init)
2595 adapter->pdata->platform_init(pdev);
2596
2597 adapter->ptp_pdev =
2598 pci_get_domain_bus_and_slot(pci_domain_nr(adapter->pdev->bus),
2599 adapter->pdev->bus->number,
2600 PCI_DEVFN(12, 4));
2601
2602 netdev->netdev_ops = &pch_gbe_netdev_ops;
2603 netdev->watchdog_timeo = PCH_GBE_WATCHDOG_PERIOD;
2604 netif_napi_add(netdev, &adapter->napi,
2605 pch_gbe_napi_poll, PCH_GBE_RX_WEIGHT);
2606 netdev->hw_features = NETIF_F_RXCSUM |
2607 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2608 netdev->features = netdev->hw_features;
2609 pch_gbe_set_ethtool_ops(netdev);
2610
2611
2612 netdev->min_mtu = ETH_ZLEN - ETH_HLEN;
2613 netdev->max_mtu = PCH_GBE_MAX_JUMBO_FRAME_SIZE -
2614 (ETH_HLEN + ETH_FCS_LEN);
2615
2616 pch_gbe_mac_load_mac_addr(&adapter->hw);
2617 pch_gbe_mac_reset_hw(&adapter->hw);
2618
2619
2620 ret = pch_gbe_sw_init(adapter);
2621 if (ret)
2622 goto err_free_netdev;
2623
2624
2625 ret = pch_gbe_init_phy(adapter);
2626 if (ret) {
2627 dev_err(&pdev->dev, "PHY initialize error\n");
2628 goto err_free_adapter;
2629 }
2630 pch_gbe_hal_get_bus_info(&adapter->hw);
2631
2632
2633 ret = pch_gbe_hal_read_mac_addr(&adapter->hw);
2634 if (ret) {
2635 dev_err(&pdev->dev, "MAC address Read Error\n");
2636 goto err_free_adapter;
2637 }
2638
2639 memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
2640 if (!is_valid_ether_addr(netdev->dev_addr)) {
2641
2642
2643
2644
2645
2646
2647 dev_err(&pdev->dev, "Invalid MAC address, "
2648 "interface disabled.\n");
2649 }
2650 timer_setup(&adapter->watchdog_timer, pch_gbe_watchdog, 0);
2651
2652 INIT_WORK(&adapter->reset_task, pch_gbe_reset_task);
2653
2654 pch_gbe_check_options(adapter);
2655
2656
2657 adapter->wake_up_evt = PCH_GBE_WL_INIT_SETTING;
2658 dev_info(&pdev->dev, "MAC address : %pM\n", netdev->dev_addr);
2659
2660
2661 pch_gbe_reset(adapter);
2662
2663 ret = register_netdev(netdev);
2664 if (ret)
2665 goto err_free_adapter;
2666
2667 netif_carrier_off(netdev);
2668 netif_stop_queue(netdev);
2669
2670 dev_dbg(&pdev->dev, "PCH Network Connection\n");
2671
2672
2673 if (adapter->pdata && adapter->pdata->phy_disable_hibernate)
2674 pch_gbe_phy_disable_hibernate(&adapter->hw);
2675
2676 device_set_wakeup_enable(&pdev->dev, 1);
2677 return 0;
2678
2679err_free_adapter:
2680 pch_gbe_hal_phy_hw_reset(&adapter->hw);
2681err_free_netdev:
2682 free_netdev(netdev);
2683 return ret;
2684}
2685
2686
2687
2688
2689static int pch_gbe_minnow_platform_init(struct pci_dev *pdev)
2690{
2691 unsigned long flags = GPIOF_DIR_OUT | GPIOF_INIT_HIGH | GPIOF_EXPORT;
2692 unsigned gpio = MINNOW_PHY_RESET_GPIO;
2693 int ret;
2694
2695 ret = devm_gpio_request_one(&pdev->dev, gpio, flags,
2696 "minnow_phy_reset");
2697 if (ret) {
2698 dev_err(&pdev->dev,
2699 "ERR: Can't request PHY reset GPIO line '%d'\n", gpio);
2700 return ret;
2701 }
2702
2703 gpio_set_value(gpio, 0);
2704 usleep_range(1250, 1500);
2705 gpio_set_value(gpio, 1);
2706 usleep_range(1250, 1500);
2707
2708 return ret;
2709}
2710
2711static struct pch_gbe_privdata pch_gbe_minnow_privdata = {
2712 .phy_tx_clk_delay = true,
2713 .phy_disable_hibernate = true,
2714 .platform_init = pch_gbe_minnow_platform_init,
2715};
2716
2717static const struct pci_device_id pch_gbe_pcidev_id[] = {
2718 {.vendor = PCI_VENDOR_ID_INTEL,
2719 .device = PCI_DEVICE_ID_INTEL_IOH1_GBE,
2720 .subvendor = PCI_VENDOR_ID_CIRCUITCO,
2721 .subdevice = PCI_SUBSYSTEM_ID_CIRCUITCO_MINNOWBOARD,
2722 .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
2723 .class_mask = (0xFFFF00),
2724 .driver_data = (kernel_ulong_t)&pch_gbe_minnow_privdata
2725 },
2726 {.vendor = PCI_VENDOR_ID_INTEL,
2727 .device = PCI_DEVICE_ID_INTEL_IOH1_GBE,
2728 .subvendor = PCI_ANY_ID,
2729 .subdevice = PCI_ANY_ID,
2730 .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
2731 .class_mask = (0xFFFF00)
2732 },
2733 {.vendor = PCI_VENDOR_ID_ROHM,
2734 .device = PCI_DEVICE_ID_ROHM_ML7223_GBE,
2735 .subvendor = PCI_ANY_ID,
2736 .subdevice = PCI_ANY_ID,
2737 .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
2738 .class_mask = (0xFFFF00)
2739 },
2740 {.vendor = PCI_VENDOR_ID_ROHM,
2741 .device = PCI_DEVICE_ID_ROHM_ML7831_GBE,
2742 .subvendor = PCI_ANY_ID,
2743 .subdevice = PCI_ANY_ID,
2744 .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
2745 .class_mask = (0xFFFF00)
2746 },
2747
2748 {0}
2749};
2750
2751#ifdef CONFIG_PM
2752static const struct dev_pm_ops pch_gbe_pm_ops = {
2753 .suspend = pch_gbe_suspend,
2754 .resume = pch_gbe_resume,
2755 .freeze = pch_gbe_suspend,
2756 .thaw = pch_gbe_resume,
2757 .poweroff = pch_gbe_suspend,
2758 .restore = pch_gbe_resume,
2759};
2760#endif
2761
2762static const struct pci_error_handlers pch_gbe_err_handler = {
2763 .error_detected = pch_gbe_io_error_detected,
2764 .slot_reset = pch_gbe_io_slot_reset,
2765 .resume = pch_gbe_io_resume
2766};
2767
2768static struct pci_driver pch_gbe_driver = {
2769 .name = KBUILD_MODNAME,
2770 .id_table = pch_gbe_pcidev_id,
2771 .probe = pch_gbe_probe,
2772 .remove = pch_gbe_remove,
2773#ifdef CONFIG_PM
2774 .driver.pm = &pch_gbe_pm_ops,
2775#endif
2776 .shutdown = pch_gbe_shutdown,
2777 .err_handler = &pch_gbe_err_handler
2778};
2779
2780
2781static int __init pch_gbe_init_module(void)
2782{
2783 int ret;
2784
2785 pr_info("EG20T PCH Gigabit Ethernet Driver - version %s\n",DRV_VERSION);
2786 ret = pci_register_driver(&pch_gbe_driver);
2787 if (copybreak != PCH_GBE_COPYBREAK_DEFAULT) {
2788 if (copybreak == 0) {
2789 pr_info("copybreak disabled\n");
2790 } else {
2791 pr_info("copybreak enabled for packets <= %u bytes\n",
2792 copybreak);
2793 }
2794 }
2795 return ret;
2796}
2797
2798static void __exit pch_gbe_exit_module(void)
2799{
2800 pci_unregister_driver(&pch_gbe_driver);
2801}
2802
2803module_init(pch_gbe_init_module);
2804module_exit(pch_gbe_exit_module);
2805
2806MODULE_DESCRIPTION("EG20T PCH Gigabit ethernet Driver");
2807MODULE_AUTHOR("LAPIS SEMICONDUCTOR, <tshimizu818@gmail.com>");
2808MODULE_LICENSE("GPL");
2809MODULE_VERSION(DRV_VERSION);
2810MODULE_DEVICE_TABLE(pci, pch_gbe_pcidev_id);
2811
2812module_param(copybreak, uint, 0644);
2813MODULE_PARM_DESC(copybreak,
2814 "Maximum size of packet that is copied to a new buffer on receive");
2815
2816
2817