1
2
3
4
5
6
7
8
9#include "pch_gbe.h"
10#include "pch_gbe_phy.h"
11
12#include <linux/gpio/consumer.h>
13#include <linux/gpio/machine.h>
14#include <linux/iopoll.h>
15#include <linux/module.h>
16#include <linux/net_tstamp.h>
17#include <linux/ptp_classify.h>
18#include <linux/ptp_pch.h>
19#include <linux/gpio.h>
20
21#define PCH_GBE_MAR_ENTRIES 16
22#define PCH_GBE_SHORT_PKT 64
23#define DSC_INIT16 0xC000
24#define PCH_GBE_DMA_ALIGN 0
25#define PCH_GBE_DMA_PADDING 2
26#define PCH_GBE_WATCHDOG_PERIOD (5 * HZ)
27#define PCH_GBE_PCI_BAR 1
28#define PCH_GBE_RESERVE_MEMORY 0x200000
29
30#define PCI_DEVICE_ID_INTEL_IOH1_GBE 0x8802
31
32#define PCI_DEVICE_ID_ROHM_ML7223_GBE 0x8013
33#define PCI_DEVICE_ID_ROHM_ML7831_GBE 0x8802
34
35#define PCH_GBE_RX_BUFFER_WRITE 16
36
37
38#define PCH_GBE_WL_INIT_SETTING (PCH_GBE_WLC_MP)
39
40#define PCH_GBE_MAC_RGMII_CTRL_SETTING ( \
41 PCH_GBE_CHIP_TYPE_INTERNAL | \
42 PCH_GBE_RGMII_MODE_RGMII \
43 )
44
45
46#define PCH_GBE_MAX_RX_BUFFER_SIZE 0x2880
47#define PCH_GBE_MAX_JUMBO_FRAME_SIZE 10318
48#define PCH_GBE_FRAME_SIZE_2048 2048
49#define PCH_GBE_FRAME_SIZE_4096 4096
50#define PCH_GBE_FRAME_SIZE_8192 8192
51
52#define PCH_GBE_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i]))
53#define PCH_GBE_RX_DESC(R, i) PCH_GBE_GET_DESC(R, i, pch_gbe_rx_desc)
54#define PCH_GBE_TX_DESC(R, i) PCH_GBE_GET_DESC(R, i, pch_gbe_tx_desc)
55#define PCH_GBE_DESC_UNUSED(R) \
56 ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
57 (R)->next_to_clean - (R)->next_to_use - 1)
58
59
60#define PCH_GBE_PAUSE_PKT1_VALUE 0x00C28001
61#define PCH_GBE_PAUSE_PKT2_VALUE 0x00000100
62#define PCH_GBE_PAUSE_PKT4_VALUE 0x01000888
63#define PCH_GBE_PAUSE_PKT5_VALUE 0x0000FFFF
64
65
66
67
68
69
70
71
72
73
74#define PCH_GBE_INT_ENABLE_MASK ( \
75 PCH_GBE_INT_RX_DMA_CMPLT | \
76 PCH_GBE_INT_RX_DSC_EMP | \
77 PCH_GBE_INT_RX_FIFO_ERR | \
78 PCH_GBE_INT_WOL_DET | \
79 PCH_GBE_INT_TX_CMPLT \
80 )
81
82#define PCH_GBE_INT_DISABLE_ALL 0
83
84
85
86#define MASTER_MODE (1<<0)
87#define SLAVE_MODE (0)
88#define V2_MODE (1<<31)
89#define CAP_MODE0 (0)
90#define CAP_MODE2 (1<<17)
91
92
93#define TX_SNAPSHOT_LOCKED (1<<0)
94#define RX_SNAPSHOT_LOCKED (1<<1)
95
96#define PTP_L4_MULTICAST_SA "01:00:5e:00:01:81"
97#define PTP_L2_MULTICAST_SA "01:1b:19:00:00:00"
98
99static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg);
100static void pch_gbe_mdio_write(struct net_device *netdev, int addr, int reg,
101 int data);
102static void pch_gbe_set_multi(struct net_device *netdev);
103
104static int pch_ptp_match(struct sk_buff *skb, u16 uid_hi, u32 uid_lo, u16 seqid)
105{
106 u8 *data = skb->data;
107 unsigned int offset;
108 u16 hi, id;
109 u32 lo;
110
111 if (ptp_classify_raw(skb) == PTP_CLASS_NONE)
112 return 0;
113
114 offset = ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN;
115
116 if (skb->len < offset + OFF_PTP_SEQUENCE_ID + sizeof(seqid))
117 return 0;
118
119 hi = get_unaligned_be16(data + offset + OFF_PTP_SOURCE_UUID + 0);
120 lo = get_unaligned_be32(data + offset + OFF_PTP_SOURCE_UUID + 2);
121 id = get_unaligned_be16(data + offset + OFF_PTP_SEQUENCE_ID);
122
123 return (uid_hi == hi && uid_lo == lo && seqid == id);
124}
125
126static void
127pch_rx_timestamp(struct pch_gbe_adapter *adapter, struct sk_buff *skb)
128{
129 struct skb_shared_hwtstamps *shhwtstamps;
130 struct pci_dev *pdev;
131 u64 ns;
132 u32 hi, lo, val;
133
134 if (!adapter->hwts_rx_en)
135 return;
136
137
138 pdev = adapter->ptp_pdev;
139
140 val = pch_ch_event_read(pdev);
141
142 if (!(val & RX_SNAPSHOT_LOCKED))
143 return;
144
145 lo = pch_src_uuid_lo_read(pdev);
146 hi = pch_src_uuid_hi_read(pdev);
147
148 if (!pch_ptp_match(skb, hi, lo, hi >> 16))
149 goto out;
150
151 ns = pch_rx_snap_read(pdev);
152
153 shhwtstamps = skb_hwtstamps(skb);
154 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
155 shhwtstamps->hwtstamp = ns_to_ktime(ns);
156out:
157 pch_ch_event_write(pdev, RX_SNAPSHOT_LOCKED);
158}
159
160static void
161pch_tx_timestamp(struct pch_gbe_adapter *adapter, struct sk_buff *skb)
162{
163 struct skb_shared_hwtstamps shhwtstamps;
164 struct pci_dev *pdev;
165 struct skb_shared_info *shtx;
166 u64 ns;
167 u32 cnt, val;
168
169 shtx = skb_shinfo(skb);
170 if (likely(!(shtx->tx_flags & SKBTX_HW_TSTAMP && adapter->hwts_tx_en)))
171 return;
172
173 shtx->tx_flags |= SKBTX_IN_PROGRESS;
174
175
176 pdev = adapter->ptp_pdev;
177
178
179
180
181 for (cnt = 0; cnt < 100; cnt++) {
182 val = pch_ch_event_read(pdev);
183 if (val & TX_SNAPSHOT_LOCKED)
184 break;
185 udelay(1);
186 }
187 if (!(val & TX_SNAPSHOT_LOCKED)) {
188 shtx->tx_flags &= ~SKBTX_IN_PROGRESS;
189 return;
190 }
191
192 ns = pch_tx_snap_read(pdev);
193
194 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
195 shhwtstamps.hwtstamp = ns_to_ktime(ns);
196 skb_tstamp_tx(skb, &shhwtstamps);
197
198 pch_ch_event_write(pdev, TX_SNAPSHOT_LOCKED);
199}
200
201static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
202{
203 struct hwtstamp_config cfg;
204 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
205 struct pci_dev *pdev;
206 u8 station[20];
207
208 if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
209 return -EFAULT;
210
211
212 pdev = adapter->ptp_pdev;
213
214 if (cfg.tx_type != HWTSTAMP_TX_OFF && cfg.tx_type != HWTSTAMP_TX_ON)
215 return -ERANGE;
216
217 switch (cfg.rx_filter) {
218 case HWTSTAMP_FILTER_NONE:
219 adapter->hwts_rx_en = 0;
220 break;
221 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
222 adapter->hwts_rx_en = 0;
223 pch_ch_control_write(pdev, SLAVE_MODE | CAP_MODE0);
224 break;
225 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
226 adapter->hwts_rx_en = 1;
227 pch_ch_control_write(pdev, MASTER_MODE | CAP_MODE0);
228 break;
229 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
230 adapter->hwts_rx_en = 1;
231 pch_ch_control_write(pdev, V2_MODE | CAP_MODE2);
232 strcpy(station, PTP_L4_MULTICAST_SA);
233 pch_set_station_address(station, pdev);
234 break;
235 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
236 adapter->hwts_rx_en = 1;
237 pch_ch_control_write(pdev, V2_MODE | CAP_MODE2);
238 strcpy(station, PTP_L2_MULTICAST_SA);
239 pch_set_station_address(station, pdev);
240 break;
241 default:
242 return -ERANGE;
243 }
244
245 adapter->hwts_tx_en = cfg.tx_type == HWTSTAMP_TX_ON;
246
247
248 pch_ch_event_write(pdev, TX_SNAPSHOT_LOCKED | RX_SNAPSHOT_LOCKED);
249
250 return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
251}
252
253static inline void pch_gbe_mac_load_mac_addr(struct pch_gbe_hw *hw)
254{
255 iowrite32(0x01, &hw->reg->MAC_ADDR_LOAD);
256}
257
258
259
260
261
262
263
264static s32 pch_gbe_mac_read_mac_addr(struct pch_gbe_hw *hw)
265{
266 struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
267 u32 adr1a, adr1b;
268
269 adr1a = ioread32(&hw->reg->mac_adr[0].high);
270 adr1b = ioread32(&hw->reg->mac_adr[0].low);
271
272 hw->mac.addr[0] = (u8)(adr1a & 0xFF);
273 hw->mac.addr[1] = (u8)((adr1a >> 8) & 0xFF);
274 hw->mac.addr[2] = (u8)((adr1a >> 16) & 0xFF);
275 hw->mac.addr[3] = (u8)((adr1a >> 24) & 0xFF);
276 hw->mac.addr[4] = (u8)(adr1b & 0xFF);
277 hw->mac.addr[5] = (u8)((adr1b >> 8) & 0xFF);
278
279 netdev_dbg(adapter->netdev, "hw->mac.addr : %pM\n", hw->mac.addr);
280 return 0;
281}
282
283
284
285
286
287
288static void pch_gbe_wait_clr_bit(void __iomem *reg, u32 bit)
289{
290 u32 tmp;
291
292
293 if (readx_poll_timeout_atomic(ioread32, reg, tmp, !(tmp & bit), 0, 10))
294 pr_err("Error: busy bit is not cleared\n");
295}
296
297
298
299
300
301
302
303static void pch_gbe_mac_mar_set(struct pch_gbe_hw *hw, u8 * addr, u32 index)
304{
305 struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
306 u32 mar_low, mar_high, adrmask;
307
308 netdev_dbg(adapter->netdev, "index : 0x%x\n", index);
309
310
311
312
313
314 mar_high = ((u32) addr[0] | ((u32) addr[1] << 8) |
315 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
316 mar_low = ((u32) addr[4] | ((u32) addr[5] << 8));
317
318 adrmask = ioread32(&hw->reg->ADDR_MASK);
319 iowrite32((adrmask | (0x0001 << index)), &hw->reg->ADDR_MASK);
320
321 pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
322
323 iowrite32(mar_high, &hw->reg->mac_adr[index].high);
324 iowrite32(mar_low, &hw->reg->mac_adr[index].low);
325
326 iowrite32((adrmask & ~(0x0001 << index)), &hw->reg->ADDR_MASK);
327
328 pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
329}
330
331
332
333
334
335static void pch_gbe_mac_reset_hw(struct pch_gbe_hw *hw)
336{
337
338 pch_gbe_mac_read_mac_addr(hw);
339 iowrite32(PCH_GBE_ALL_RST, &hw->reg->RESET);
340 iowrite32(PCH_GBE_MODE_GMII_ETHER, &hw->reg->MODE);
341 pch_gbe_wait_clr_bit(&hw->reg->RESET, PCH_GBE_ALL_RST);
342
343 pch_gbe_mac_mar_set(hw, hw->mac.addr, 0);
344 return;
345}
346
347static void pch_gbe_disable_mac_rx(struct pch_gbe_hw *hw)
348{
349 u32 rctl;
350
351 rctl = ioread32(&hw->reg->MAC_RX_EN);
352 iowrite32((rctl & ~PCH_GBE_MRE_MAC_RX_EN), &hw->reg->MAC_RX_EN);
353}
354
355static void pch_gbe_enable_mac_rx(struct pch_gbe_hw *hw)
356{
357 u32 rctl;
358
359 rctl = ioread32(&hw->reg->MAC_RX_EN);
360 iowrite32((rctl | PCH_GBE_MRE_MAC_RX_EN), &hw->reg->MAC_RX_EN);
361}
362
363
364
365
366
367
368static void pch_gbe_mac_init_rx_addrs(struct pch_gbe_hw *hw, u16 mar_count)
369{
370 u32 i;
371
372
373 pch_gbe_mac_mar_set(hw, hw->mac.addr, 0);
374
375
376 for (i = 1; i < mar_count; i++) {
377 iowrite32(0, &hw->reg->mac_adr[i].high);
378 iowrite32(0, &hw->reg->mac_adr[i].low);
379 }
380 iowrite32(0xFFFE, &hw->reg->ADDR_MASK);
381
382 pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
383}
384
385
386
387
388
389
390
391
392s32 pch_gbe_mac_force_mac_fc(struct pch_gbe_hw *hw)
393{
394 struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
395 struct pch_gbe_mac_info *mac = &hw->mac;
396 u32 rx_fctrl;
397
398 netdev_dbg(adapter->netdev, "mac->fc = %u\n", mac->fc);
399
400 rx_fctrl = ioread32(&hw->reg->RX_FCTRL);
401
402 switch (mac->fc) {
403 case PCH_GBE_FC_NONE:
404 rx_fctrl &= ~PCH_GBE_FL_CTRL_EN;
405 mac->tx_fc_enable = false;
406 break;
407 case PCH_GBE_FC_RX_PAUSE:
408 rx_fctrl |= PCH_GBE_FL_CTRL_EN;
409 mac->tx_fc_enable = false;
410 break;
411 case PCH_GBE_FC_TX_PAUSE:
412 rx_fctrl &= ~PCH_GBE_FL_CTRL_EN;
413 mac->tx_fc_enable = true;
414 break;
415 case PCH_GBE_FC_FULL:
416 rx_fctrl |= PCH_GBE_FL_CTRL_EN;
417 mac->tx_fc_enable = true;
418 break;
419 default:
420 netdev_err(adapter->netdev,
421 "Flow control param set incorrectly\n");
422 return -EINVAL;
423 }
424 if (mac->link_duplex == DUPLEX_HALF)
425 rx_fctrl &= ~PCH_GBE_FL_CTRL_EN;
426 iowrite32(rx_fctrl, &hw->reg->RX_FCTRL);
427 netdev_dbg(adapter->netdev,
428 "RX_FCTRL reg : 0x%08x mac->tx_fc_enable : %d\n",
429 ioread32(&hw->reg->RX_FCTRL), mac->tx_fc_enable);
430 return 0;
431}
432
433
434
435
436
437
438static void pch_gbe_mac_set_wol_event(struct pch_gbe_hw *hw, u32 wu_evt)
439{
440 struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
441 u32 addr_mask;
442
443 netdev_dbg(adapter->netdev, "wu_evt : 0x%08x ADDR_MASK reg : 0x%08x\n",
444 wu_evt, ioread32(&hw->reg->ADDR_MASK));
445
446 if (wu_evt) {
447
448 addr_mask = ioread32(&hw->reg->ADDR_MASK);
449 iowrite32(addr_mask, &hw->reg->WOL_ADDR_MASK);
450
451 pch_gbe_wait_clr_bit(&hw->reg->WOL_ADDR_MASK, PCH_GBE_WLA_BUSY);
452 iowrite32(0, &hw->reg->WOL_ST);
453 iowrite32((wu_evt | PCH_GBE_WLC_WOL_MODE), &hw->reg->WOL_CTRL);
454 iowrite32(0x02, &hw->reg->TCPIP_ACC);
455 iowrite32(PCH_GBE_INT_ENABLE_MASK, &hw->reg->INT_EN);
456 } else {
457 iowrite32(0, &hw->reg->WOL_CTRL);
458 iowrite32(0, &hw->reg->WOL_ST);
459 }
460 return;
461}
462
463
464
465
466
467
468
469
470
471
472
473u16 pch_gbe_mac_ctrl_miim(struct pch_gbe_hw *hw, u32 addr, u32 dir, u32 reg,
474 u16 data)
475{
476 struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
477 unsigned long flags;
478 u32 data_out;
479
480 spin_lock_irqsave(&hw->miim_lock, flags);
481
482 if (readx_poll_timeout_atomic(ioread32, &hw->reg->MIIM, data_out,
483 data_out & PCH_GBE_MIIM_OPER_READY, 20, 2000)) {
484 netdev_err(adapter->netdev, "pch-gbe.miim won't go Ready\n");
485 spin_unlock_irqrestore(&hw->miim_lock, flags);
486 return 0;
487 }
488 iowrite32(((reg << PCH_GBE_MIIM_REG_ADDR_SHIFT) |
489 (addr << PCH_GBE_MIIM_PHY_ADDR_SHIFT) |
490 dir | data), &hw->reg->MIIM);
491 readx_poll_timeout_atomic(ioread32, &hw->reg->MIIM, data_out,
492 data_out & PCH_GBE_MIIM_OPER_READY, 20, 2000);
493 spin_unlock_irqrestore(&hw->miim_lock, flags);
494
495 netdev_dbg(adapter->netdev, "PHY %s: reg=%d, data=0x%04X\n",
496 dir == PCH_GBE_MIIM_OPER_READ ? "READ" : "WRITE", reg,
497 dir == PCH_GBE_MIIM_OPER_READ ? data_out : data);
498 return (u16) data_out;
499}
500
501
502
503
504
505static void pch_gbe_mac_set_pause_packet(struct pch_gbe_hw *hw)
506{
507 struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
508 unsigned long tmp2, tmp3;
509
510
511 tmp2 = hw->mac.addr[1];
512 tmp2 = (tmp2 << 8) | hw->mac.addr[0];
513 tmp2 = PCH_GBE_PAUSE_PKT2_VALUE | (tmp2 << 16);
514
515 tmp3 = hw->mac.addr[5];
516 tmp3 = (tmp3 << 8) | hw->mac.addr[4];
517 tmp3 = (tmp3 << 8) | hw->mac.addr[3];
518 tmp3 = (tmp3 << 8) | hw->mac.addr[2];
519
520 iowrite32(PCH_GBE_PAUSE_PKT1_VALUE, &hw->reg->PAUSE_PKT1);
521 iowrite32(tmp2, &hw->reg->PAUSE_PKT2);
522 iowrite32(tmp3, &hw->reg->PAUSE_PKT3);
523 iowrite32(PCH_GBE_PAUSE_PKT4_VALUE, &hw->reg->PAUSE_PKT4);
524 iowrite32(PCH_GBE_PAUSE_PKT5_VALUE, &hw->reg->PAUSE_PKT5);
525
526
527 iowrite32(PCH_GBE_PS_PKT_RQ, &hw->reg->PAUSE_REQ);
528
529 netdev_dbg(adapter->netdev,
530 "PAUSE_PKT1-5 reg : 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
531 ioread32(&hw->reg->PAUSE_PKT1),
532 ioread32(&hw->reg->PAUSE_PKT2),
533 ioread32(&hw->reg->PAUSE_PKT3),
534 ioread32(&hw->reg->PAUSE_PKT4),
535 ioread32(&hw->reg->PAUSE_PKT5));
536
537 return;
538}
539
540
541
542
543
544
545
546
547
548static int pch_gbe_alloc_queues(struct pch_gbe_adapter *adapter)
549{
550 adapter->tx_ring = devm_kzalloc(&adapter->pdev->dev,
551 sizeof(*adapter->tx_ring), GFP_KERNEL);
552 if (!adapter->tx_ring)
553 return -ENOMEM;
554
555 adapter->rx_ring = devm_kzalloc(&adapter->pdev->dev,
556 sizeof(*adapter->rx_ring), GFP_KERNEL);
557 if (!adapter->rx_ring)
558 return -ENOMEM;
559 return 0;
560}
561
562
563
564
565
566static void pch_gbe_init_stats(struct pch_gbe_adapter *adapter)
567{
568 memset(&adapter->stats, 0, sizeof(adapter->stats));
569 return;
570}
571
572
573
574
575
576
577
578
579static int pch_gbe_init_phy(struct pch_gbe_adapter *adapter)
580{
581 struct net_device *netdev = adapter->netdev;
582 u32 addr;
583 u16 bmcr, stat;
584
585
586 for (addr = 0; addr < PCH_GBE_PHY_REGS_LEN; addr++) {
587 adapter->mii.phy_id = (addr == 0) ? 1 : (addr == 1) ? 0 : addr;
588 bmcr = pch_gbe_mdio_read(netdev, adapter->mii.phy_id, MII_BMCR);
589 stat = pch_gbe_mdio_read(netdev, adapter->mii.phy_id, MII_BMSR);
590 stat = pch_gbe_mdio_read(netdev, adapter->mii.phy_id, MII_BMSR);
591 if (!((bmcr == 0xFFFF) || ((stat == 0) && (bmcr == 0))))
592 break;
593 }
594 adapter->hw.phy.addr = adapter->mii.phy_id;
595 netdev_dbg(netdev, "phy_addr = %d\n", adapter->mii.phy_id);
596 if (addr == PCH_GBE_PHY_REGS_LEN)
597 return -EAGAIN;
598
599 for (addr = 0; addr < PCH_GBE_PHY_REGS_LEN; addr++) {
600 if (addr != adapter->mii.phy_id) {
601 pch_gbe_mdio_write(netdev, addr, MII_BMCR,
602 BMCR_ISOLATE);
603 } else {
604 bmcr = pch_gbe_mdio_read(netdev, addr, MII_BMCR);
605 pch_gbe_mdio_write(netdev, addr, MII_BMCR,
606 bmcr & ~BMCR_ISOLATE);
607 }
608 }
609
610
611 adapter->mii.phy_id_mask = 0x1F;
612 adapter->mii.reg_num_mask = 0x1F;
613 adapter->mii.dev = adapter->netdev;
614 adapter->mii.mdio_read = pch_gbe_mdio_read;
615 adapter->mii.mdio_write = pch_gbe_mdio_write;
616 adapter->mii.supports_gmii = mii_check_gmii_support(&adapter->mii);
617 return 0;
618}
619
620
621
622
623
624
625
626
627
628
629static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg)
630{
631 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
632 struct pch_gbe_hw *hw = &adapter->hw;
633
634 return pch_gbe_mac_ctrl_miim(hw, addr, PCH_GBE_HAL_MIIM_READ, reg,
635 (u16) 0);
636}
637
638
639
640
641
642
643
644
645static void pch_gbe_mdio_write(struct net_device *netdev,
646 int addr, int reg, int data)
647{
648 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
649 struct pch_gbe_hw *hw = &adapter->hw;
650
651 pch_gbe_mac_ctrl_miim(hw, addr, PCH_GBE_HAL_MIIM_WRITE, reg, data);
652}
653
654
655
656
657
658static void pch_gbe_reset_task(struct work_struct *work)
659{
660 struct pch_gbe_adapter *adapter;
661 adapter = container_of(work, struct pch_gbe_adapter, reset_task);
662
663 rtnl_lock();
664 pch_gbe_reinit_locked(adapter);
665 rtnl_unlock();
666}
667
668
669
670
671
672void pch_gbe_reinit_locked(struct pch_gbe_adapter *adapter)
673{
674 pch_gbe_down(adapter);
675 pch_gbe_up(adapter);
676}
677
678
679
680
681
682void pch_gbe_reset(struct pch_gbe_adapter *adapter)
683{
684 struct net_device *netdev = adapter->netdev;
685 struct pch_gbe_hw *hw = &adapter->hw;
686 s32 ret_val;
687
688 pch_gbe_mac_reset_hw(hw);
689
690 pch_gbe_set_multi(netdev);
691
692 pch_gbe_mac_init_rx_addrs(hw, PCH_GBE_MAR_ENTRIES);
693
694 ret_val = pch_gbe_phy_get_id(hw);
695 if (ret_val) {
696 netdev_err(adapter->netdev, "pch_gbe_phy_get_id error\n");
697 return;
698 }
699 pch_gbe_phy_init_setting(hw);
700
701 pch_gbe_phy_set_rgmii(hw);
702}
703
704
705
706
707
708static void pch_gbe_free_irq(struct pch_gbe_adapter *adapter)
709{
710 struct net_device *netdev = adapter->netdev;
711
712 free_irq(adapter->irq, netdev);
713 pci_free_irq_vectors(adapter->pdev);
714}
715
716
717
718
719
720static void pch_gbe_irq_disable(struct pch_gbe_adapter *adapter)
721{
722 struct pch_gbe_hw *hw = &adapter->hw;
723
724 atomic_inc(&adapter->irq_sem);
725 iowrite32(0, &hw->reg->INT_EN);
726 ioread32(&hw->reg->INT_ST);
727 synchronize_irq(adapter->irq);
728
729 netdev_dbg(adapter->netdev, "INT_EN reg : 0x%08x\n",
730 ioread32(&hw->reg->INT_EN));
731}
732
733
734
735
736
737static void pch_gbe_irq_enable(struct pch_gbe_adapter *adapter)
738{
739 struct pch_gbe_hw *hw = &adapter->hw;
740
741 if (likely(atomic_dec_and_test(&adapter->irq_sem)))
742 iowrite32(PCH_GBE_INT_ENABLE_MASK, &hw->reg->INT_EN);
743 ioread32(&hw->reg->INT_ST);
744 netdev_dbg(adapter->netdev, "INT_EN reg : 0x%08x\n",
745 ioread32(&hw->reg->INT_EN));
746}
747
748
749
750
751
752
753
754static void pch_gbe_setup_tctl(struct pch_gbe_adapter *adapter)
755{
756 struct pch_gbe_hw *hw = &adapter->hw;
757 u32 tx_mode, tcpip;
758
759 tx_mode = PCH_GBE_TM_LONG_PKT |
760 PCH_GBE_TM_ST_AND_FD |
761 PCH_GBE_TM_SHORT_PKT |
762 PCH_GBE_TM_TH_TX_STRT_8 |
763 PCH_GBE_TM_TH_ALM_EMP_4 | PCH_GBE_TM_TH_ALM_FULL_8;
764
765 iowrite32(tx_mode, &hw->reg->TX_MODE);
766
767 tcpip = ioread32(&hw->reg->TCPIP_ACC);
768 tcpip |= PCH_GBE_TX_TCPIPACC_EN;
769 iowrite32(tcpip, &hw->reg->TCPIP_ACC);
770 return;
771}
772
773
774
775
776
777static void pch_gbe_configure_tx(struct pch_gbe_adapter *adapter)
778{
779 struct pch_gbe_hw *hw = &adapter->hw;
780 u32 tdba, tdlen, dctrl;
781
782 netdev_dbg(adapter->netdev, "dma addr = 0x%08llx size = 0x%08x\n",
783 (unsigned long long)adapter->tx_ring->dma,
784 adapter->tx_ring->size);
785
786
787 tdba = adapter->tx_ring->dma;
788 tdlen = adapter->tx_ring->size - 0x10;
789 iowrite32(tdba, &hw->reg->TX_DSC_BASE);
790 iowrite32(tdlen, &hw->reg->TX_DSC_SIZE);
791 iowrite32(tdba, &hw->reg->TX_DSC_SW_P);
792
793
794 dctrl = ioread32(&hw->reg->DMA_CTRL);
795 dctrl |= PCH_GBE_TX_DMA_EN;
796 iowrite32(dctrl, &hw->reg->DMA_CTRL);
797}
798
799
800
801
802
803static void pch_gbe_setup_rctl(struct pch_gbe_adapter *adapter)
804{
805 struct pch_gbe_hw *hw = &adapter->hw;
806 u32 rx_mode, tcpip;
807
808 rx_mode = PCH_GBE_ADD_FIL_EN | PCH_GBE_MLT_FIL_EN |
809 PCH_GBE_RH_ALM_EMP_4 | PCH_GBE_RH_ALM_FULL_4 | PCH_GBE_RH_RD_TRG_8;
810
811 iowrite32(rx_mode, &hw->reg->RX_MODE);
812
813 tcpip = ioread32(&hw->reg->TCPIP_ACC);
814
815 tcpip |= PCH_GBE_RX_TCPIPACC_OFF;
816 tcpip &= ~PCH_GBE_RX_TCPIPACC_EN;
817 iowrite32(tcpip, &hw->reg->TCPIP_ACC);
818 return;
819}
820
821
822
823
824
825static void pch_gbe_configure_rx(struct pch_gbe_adapter *adapter)
826{
827 struct pch_gbe_hw *hw = &adapter->hw;
828 u32 rdba, rdlen, rxdma;
829
830 netdev_dbg(adapter->netdev, "dma adr = 0x%08llx size = 0x%08x\n",
831 (unsigned long long)adapter->rx_ring->dma,
832 adapter->rx_ring->size);
833
834 pch_gbe_mac_force_mac_fc(hw);
835
836 pch_gbe_disable_mac_rx(hw);
837
838
839 rxdma = ioread32(&hw->reg->DMA_CTRL);
840 rxdma &= ~PCH_GBE_RX_DMA_EN;
841 iowrite32(rxdma, &hw->reg->DMA_CTRL);
842
843 netdev_dbg(adapter->netdev,
844 "MAC_RX_EN reg = 0x%08x DMA_CTRL reg = 0x%08x\n",
845 ioread32(&hw->reg->MAC_RX_EN),
846 ioread32(&hw->reg->DMA_CTRL));
847
848
849
850 rdba = adapter->rx_ring->dma;
851 rdlen = adapter->rx_ring->size - 0x10;
852 iowrite32(rdba, &hw->reg->RX_DSC_BASE);
853 iowrite32(rdlen, &hw->reg->RX_DSC_SIZE);
854 iowrite32((rdba + rdlen), &hw->reg->RX_DSC_SW_P);
855}
856
857
858
859
860
861
862static void pch_gbe_unmap_and_free_tx_resource(
863 struct pch_gbe_adapter *adapter, struct pch_gbe_buffer *buffer_info)
864{
865 if (buffer_info->mapped) {
866 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
867 buffer_info->length, DMA_TO_DEVICE);
868 buffer_info->mapped = false;
869 }
870 if (buffer_info->skb) {
871 dev_kfree_skb_any(buffer_info->skb);
872 buffer_info->skb = NULL;
873 }
874}
875
876
877
878
879
880
881static void pch_gbe_unmap_and_free_rx_resource(
882 struct pch_gbe_adapter *adapter,
883 struct pch_gbe_buffer *buffer_info)
884{
885 if (buffer_info->mapped) {
886 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
887 buffer_info->length, DMA_FROM_DEVICE);
888 buffer_info->mapped = false;
889 }
890 if (buffer_info->skb) {
891 dev_kfree_skb_any(buffer_info->skb);
892 buffer_info->skb = NULL;
893 }
894}
895
896
897
898
899
900
901static void pch_gbe_clean_tx_ring(struct pch_gbe_adapter *adapter,
902 struct pch_gbe_tx_ring *tx_ring)
903{
904 struct pch_gbe_hw *hw = &adapter->hw;
905 struct pch_gbe_buffer *buffer_info;
906 unsigned long size;
907 unsigned int i;
908
909
910 for (i = 0; i < tx_ring->count; i++) {
911 buffer_info = &tx_ring->buffer_info[i];
912 pch_gbe_unmap_and_free_tx_resource(adapter, buffer_info);
913 }
914 netdev_dbg(adapter->netdev,
915 "call pch_gbe_unmap_and_free_tx_resource() %d count\n", i);
916
917 size = (unsigned long)sizeof(struct pch_gbe_buffer) * tx_ring->count;
918 memset(tx_ring->buffer_info, 0, size);
919
920
921 memset(tx_ring->desc, 0, tx_ring->size);
922 tx_ring->next_to_use = 0;
923 tx_ring->next_to_clean = 0;
924 iowrite32(tx_ring->dma, &hw->reg->TX_DSC_HW_P);
925 iowrite32((tx_ring->size - 0x10), &hw->reg->TX_DSC_SIZE);
926}
927
928
929
930
931
932
933static void
934pch_gbe_clean_rx_ring(struct pch_gbe_adapter *adapter,
935 struct pch_gbe_rx_ring *rx_ring)
936{
937 struct pch_gbe_hw *hw = &adapter->hw;
938 struct pch_gbe_buffer *buffer_info;
939 unsigned long size;
940 unsigned int i;
941
942
943 for (i = 0; i < rx_ring->count; i++) {
944 buffer_info = &rx_ring->buffer_info[i];
945 pch_gbe_unmap_and_free_rx_resource(adapter, buffer_info);
946 }
947 netdev_dbg(adapter->netdev,
948 "call pch_gbe_unmap_and_free_rx_resource() %d count\n", i);
949 size = (unsigned long)sizeof(struct pch_gbe_buffer) * rx_ring->count;
950 memset(rx_ring->buffer_info, 0, size);
951
952
953 memset(rx_ring->desc, 0, rx_ring->size);
954 rx_ring->next_to_clean = 0;
955 rx_ring->next_to_use = 0;
956 iowrite32(rx_ring->dma, &hw->reg->RX_DSC_HW_P);
957 iowrite32((rx_ring->size - 0x10), &hw->reg->RX_DSC_SIZE);
958}
959
960static void pch_gbe_set_rgmii_ctrl(struct pch_gbe_adapter *adapter, u16 speed,
961 u16 duplex)
962{
963 struct pch_gbe_hw *hw = &adapter->hw;
964 unsigned long rgmii = 0;
965
966
967 switch (speed) {
968 case SPEED_10:
969 rgmii = (PCH_GBE_RGMII_RATE_2_5M |
970 PCH_GBE_MAC_RGMII_CTRL_SETTING);
971 break;
972 case SPEED_100:
973 rgmii = (PCH_GBE_RGMII_RATE_25M |
974 PCH_GBE_MAC_RGMII_CTRL_SETTING);
975 break;
976 case SPEED_1000:
977 rgmii = (PCH_GBE_RGMII_RATE_125M |
978 PCH_GBE_MAC_RGMII_CTRL_SETTING);
979 break;
980 }
981 iowrite32(rgmii, &hw->reg->RGMII_CTRL);
982}
983static void pch_gbe_set_mode(struct pch_gbe_adapter *adapter, u16 speed,
984 u16 duplex)
985{
986 struct net_device *netdev = adapter->netdev;
987 struct pch_gbe_hw *hw = &adapter->hw;
988 unsigned long mode = 0;
989
990
991 switch (speed) {
992 case SPEED_10:
993 mode = PCH_GBE_MODE_MII_ETHER;
994 netdev->tx_queue_len = 10;
995 break;
996 case SPEED_100:
997 mode = PCH_GBE_MODE_MII_ETHER;
998 netdev->tx_queue_len = 100;
999 break;
1000 case SPEED_1000:
1001 mode = PCH_GBE_MODE_GMII_ETHER;
1002 break;
1003 }
1004 if (duplex == DUPLEX_FULL)
1005 mode |= PCH_GBE_MODE_FULL_DUPLEX;
1006 else
1007 mode |= PCH_GBE_MODE_HALF_DUPLEX;
1008 iowrite32(mode, &hw->reg->MODE);
1009}
1010
1011
1012
1013
1014
1015static void pch_gbe_watchdog(struct timer_list *t)
1016{
1017 struct pch_gbe_adapter *adapter = from_timer(adapter, t,
1018 watchdog_timer);
1019 struct net_device *netdev = adapter->netdev;
1020 struct pch_gbe_hw *hw = &adapter->hw;
1021
1022 netdev_dbg(netdev, "right now = %ld\n", jiffies);
1023
1024 pch_gbe_update_stats(adapter);
1025 if ((mii_link_ok(&adapter->mii)) && (!netif_carrier_ok(netdev))) {
1026 struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET };
1027 netdev->tx_queue_len = adapter->tx_queue_len;
1028
1029 mii_ethtool_gset(&adapter->mii, &cmd);
1030 hw->mac.link_speed = ethtool_cmd_speed(&cmd);
1031 hw->mac.link_duplex = cmd.duplex;
1032
1033 pch_gbe_set_rgmii_ctrl(adapter, hw->mac.link_speed,
1034 hw->mac.link_duplex);
1035
1036 pch_gbe_set_mode(adapter, hw->mac.link_speed,
1037 hw->mac.link_duplex);
1038 netdev_dbg(netdev,
1039 "Link is Up %d Mbps %s-Duplex\n",
1040 hw->mac.link_speed,
1041 cmd.duplex == DUPLEX_FULL ? "Full" : "Half");
1042 netif_carrier_on(netdev);
1043 netif_wake_queue(netdev);
1044 } else if ((!mii_link_ok(&adapter->mii)) &&
1045 (netif_carrier_ok(netdev))) {
1046 netdev_dbg(netdev, "NIC Link is Down\n");
1047 hw->mac.link_speed = SPEED_10;
1048 hw->mac.link_duplex = DUPLEX_HALF;
1049 netif_carrier_off(netdev);
1050 netif_stop_queue(netdev);
1051 }
1052 mod_timer(&adapter->watchdog_timer,
1053 round_jiffies(jiffies + PCH_GBE_WATCHDOG_PERIOD));
1054}
1055
1056
1057
1058
1059
1060
1061
1062static void pch_gbe_tx_queue(struct pch_gbe_adapter *adapter,
1063 struct pch_gbe_tx_ring *tx_ring,
1064 struct sk_buff *skb)
1065{
1066 struct pch_gbe_hw *hw = &adapter->hw;
1067 struct pch_gbe_tx_desc *tx_desc;
1068 struct pch_gbe_buffer *buffer_info;
1069 struct sk_buff *tmp_skb;
1070 unsigned int frame_ctrl;
1071 unsigned int ring_num;
1072
1073
1074 frame_ctrl = 0;
1075 if (unlikely(skb->len < PCH_GBE_SHORT_PKT))
1076 frame_ctrl |= PCH_GBE_TXD_CTRL_APAD;
1077 if (skb->ip_summed == CHECKSUM_NONE)
1078 frame_ctrl |= PCH_GBE_TXD_CTRL_TCPIP_ACC_OFF;
1079
1080
1081
1082
1083
1084
1085 if (skb->len < PCH_GBE_SHORT_PKT && skb->ip_summed != CHECKSUM_NONE) {
1086 frame_ctrl |= PCH_GBE_TXD_CTRL_APAD |
1087 PCH_GBE_TXD_CTRL_TCPIP_ACC_OFF;
1088 if (skb->protocol == htons(ETH_P_IP)) {
1089 struct iphdr *iph = ip_hdr(skb);
1090 unsigned int offset;
1091 offset = skb_transport_offset(skb);
1092 if (iph->protocol == IPPROTO_TCP) {
1093 skb->csum = 0;
1094 tcp_hdr(skb)->check = 0;
1095 skb->csum = skb_checksum(skb, offset,
1096 skb->len - offset, 0);
1097 tcp_hdr(skb)->check =
1098 csum_tcpudp_magic(iph->saddr,
1099 iph->daddr,
1100 skb->len - offset,
1101 IPPROTO_TCP,
1102 skb->csum);
1103 } else if (iph->protocol == IPPROTO_UDP) {
1104 skb->csum = 0;
1105 udp_hdr(skb)->check = 0;
1106 skb->csum =
1107 skb_checksum(skb, offset,
1108 skb->len - offset, 0);
1109 udp_hdr(skb)->check =
1110 csum_tcpudp_magic(iph->saddr,
1111 iph->daddr,
1112 skb->len - offset,
1113 IPPROTO_UDP,
1114 skb->csum);
1115 }
1116 }
1117 }
1118
1119 ring_num = tx_ring->next_to_use;
1120 if (unlikely((ring_num + 1) == tx_ring->count))
1121 tx_ring->next_to_use = 0;
1122 else
1123 tx_ring->next_to_use = ring_num + 1;
1124
1125
1126 buffer_info = &tx_ring->buffer_info[ring_num];
1127 tmp_skb = buffer_info->skb;
1128
1129
1130 memcpy(tmp_skb->data, skb->data, ETH_HLEN);
1131 tmp_skb->data[ETH_HLEN] = 0x00;
1132 tmp_skb->data[ETH_HLEN + 1] = 0x00;
1133 tmp_skb->len = skb->len;
1134 memcpy(&tmp_skb->data[ETH_HLEN + 2], &skb->data[ETH_HLEN],
1135 (skb->len - ETH_HLEN));
1136
1137 buffer_info->length = tmp_skb->len;
1138 buffer_info->dma = dma_map_single(&adapter->pdev->dev, tmp_skb->data,
1139 buffer_info->length,
1140 DMA_TO_DEVICE);
1141 if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) {
1142 netdev_err(adapter->netdev, "TX DMA map failed\n");
1143 buffer_info->dma = 0;
1144 buffer_info->time_stamp = 0;
1145 tx_ring->next_to_use = ring_num;
1146 return;
1147 }
1148 buffer_info->mapped = true;
1149 buffer_info->time_stamp = jiffies;
1150
1151
1152 tx_desc = PCH_GBE_TX_DESC(*tx_ring, ring_num);
1153 tx_desc->buffer_addr = (buffer_info->dma);
1154 tx_desc->length = (tmp_skb->len);
1155 tx_desc->tx_words_eob = ((tmp_skb->len + 3));
1156 tx_desc->tx_frame_ctrl = (frame_ctrl);
1157 tx_desc->gbec_status = (DSC_INIT16);
1158
1159 if (unlikely(++ring_num == tx_ring->count))
1160 ring_num = 0;
1161
1162
1163 iowrite32(tx_ring->dma +
1164 (int)sizeof(struct pch_gbe_tx_desc) * ring_num,
1165 &hw->reg->TX_DSC_SW_P);
1166
1167 pch_tx_timestamp(adapter, skb);
1168
1169 dev_kfree_skb_any(skb);
1170}
1171
1172
1173
1174
1175
1176void pch_gbe_update_stats(struct pch_gbe_adapter *adapter)
1177{
1178 struct net_device *netdev = adapter->netdev;
1179 struct pci_dev *pdev = adapter->pdev;
1180 struct pch_gbe_hw_stats *stats = &adapter->stats;
1181 unsigned long flags;
1182
1183
1184
1185
1186
1187 if ((pdev->error_state) && (pdev->error_state != pci_channel_io_normal))
1188 return;
1189
1190 spin_lock_irqsave(&adapter->stats_lock, flags);
1191
1192
1193 stats->rx_errors = stats->rx_crc_errors + stats->rx_frame_errors;
1194 stats->tx_errors = stats->tx_length_errors +
1195 stats->tx_aborted_errors +
1196 stats->tx_carrier_errors + stats->tx_timeout_count;
1197
1198
1199 netdev->stats.rx_packets = stats->rx_packets;
1200 netdev->stats.rx_bytes = stats->rx_bytes;
1201 netdev->stats.rx_dropped = stats->rx_dropped;
1202 netdev->stats.tx_packets = stats->tx_packets;
1203 netdev->stats.tx_bytes = stats->tx_bytes;
1204 netdev->stats.tx_dropped = stats->tx_dropped;
1205
1206 netdev->stats.multicast = stats->multicast;
1207 netdev->stats.collisions = stats->collisions;
1208
1209 netdev->stats.rx_errors = stats->rx_errors;
1210 netdev->stats.rx_crc_errors = stats->rx_crc_errors;
1211 netdev->stats.rx_frame_errors = stats->rx_frame_errors;
1212
1213 netdev->stats.tx_errors = stats->tx_errors;
1214 netdev->stats.tx_aborted_errors = stats->tx_aborted_errors;
1215 netdev->stats.tx_carrier_errors = stats->tx_carrier_errors;
1216
1217 spin_unlock_irqrestore(&adapter->stats_lock, flags);
1218}
1219
1220static void pch_gbe_disable_dma_rx(struct pch_gbe_hw *hw)
1221{
1222 u32 rxdma;
1223
1224
1225 rxdma = ioread32(&hw->reg->DMA_CTRL);
1226 rxdma &= ~PCH_GBE_RX_DMA_EN;
1227 iowrite32(rxdma, &hw->reg->DMA_CTRL);
1228}
1229
1230static void pch_gbe_enable_dma_rx(struct pch_gbe_hw *hw)
1231{
1232 u32 rxdma;
1233
1234
1235 rxdma = ioread32(&hw->reg->DMA_CTRL);
1236 rxdma |= PCH_GBE_RX_DMA_EN;
1237 iowrite32(rxdma, &hw->reg->DMA_CTRL);
1238}
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248static irqreturn_t pch_gbe_intr(int irq, void *data)
1249{
1250 struct net_device *netdev = data;
1251 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
1252 struct pch_gbe_hw *hw = &adapter->hw;
1253 u32 int_st;
1254 u32 int_en;
1255
1256
1257 int_st = ioread32(&hw->reg->INT_ST);
1258 int_st = int_st & ioread32(&hw->reg->INT_EN);
1259
1260 if (unlikely(!int_st))
1261 return IRQ_NONE;
1262 netdev_dbg(netdev, "%s occur int_st = 0x%08x\n", __func__, int_st);
1263 if (int_st & PCH_GBE_INT_RX_FRAME_ERR)
1264 adapter->stats.intr_rx_frame_err_count++;
1265 if (int_st & PCH_GBE_INT_RX_FIFO_ERR)
1266 if (!adapter->rx_stop_flag) {
1267 adapter->stats.intr_rx_fifo_err_count++;
1268 netdev_dbg(netdev, "Rx fifo over run\n");
1269 adapter->rx_stop_flag = true;
1270 int_en = ioread32(&hw->reg->INT_EN);
1271 iowrite32((int_en & ~PCH_GBE_INT_RX_FIFO_ERR),
1272 &hw->reg->INT_EN);
1273 pch_gbe_disable_dma_rx(&adapter->hw);
1274 int_st |= ioread32(&hw->reg->INT_ST);
1275 int_st = int_st & ioread32(&hw->reg->INT_EN);
1276 }
1277 if (int_st & PCH_GBE_INT_RX_DMA_ERR)
1278 adapter->stats.intr_rx_dma_err_count++;
1279 if (int_st & PCH_GBE_INT_TX_FIFO_ERR)
1280 adapter->stats.intr_tx_fifo_err_count++;
1281 if (int_st & PCH_GBE_INT_TX_DMA_ERR)
1282 adapter->stats.intr_tx_dma_err_count++;
1283 if (int_st & PCH_GBE_INT_TCPIP_ERR)
1284 adapter->stats.intr_tcpip_err_count++;
1285
1286 if ((int_st & PCH_GBE_INT_RX_DSC_EMP)) {
1287 adapter->stats.intr_rx_dsc_empty_count++;
1288 netdev_dbg(netdev, "Rx descriptor is empty\n");
1289 int_en = ioread32(&hw->reg->INT_EN);
1290 iowrite32((int_en & ~PCH_GBE_INT_RX_DSC_EMP), &hw->reg->INT_EN);
1291 if (hw->mac.tx_fc_enable) {
1292
1293 pch_gbe_mac_set_pause_packet(hw);
1294 }
1295 }
1296
1297
1298 if ((int_st & (PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT)) ||
1299 (adapter->rx_stop_flag)) {
1300 if (likely(napi_schedule_prep(&adapter->napi))) {
1301
1302 atomic_inc(&adapter->irq_sem);
1303 int_en = ioread32(&hw->reg->INT_EN);
1304 int_en &=
1305 ~(PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT);
1306 iowrite32(int_en, &hw->reg->INT_EN);
1307
1308 __napi_schedule(&adapter->napi);
1309 }
1310 }
1311 netdev_dbg(netdev, "return = 0x%08x INT_EN reg = 0x%08x\n",
1312 IRQ_HANDLED, ioread32(&hw->reg->INT_EN));
1313 return IRQ_HANDLED;
1314}
1315
1316
1317
1318
1319
1320
1321
1322static void
1323pch_gbe_alloc_rx_buffers(struct pch_gbe_adapter *adapter,
1324 struct pch_gbe_rx_ring *rx_ring, int cleaned_count)
1325{
1326 struct net_device *netdev = adapter->netdev;
1327 struct pci_dev *pdev = adapter->pdev;
1328 struct pch_gbe_hw *hw = &adapter->hw;
1329 struct pch_gbe_rx_desc *rx_desc;
1330 struct pch_gbe_buffer *buffer_info;
1331 struct sk_buff *skb;
1332 unsigned int i;
1333 unsigned int bufsz;
1334
1335 bufsz = adapter->rx_buffer_len + NET_IP_ALIGN;
1336 i = rx_ring->next_to_use;
1337
1338 while ((cleaned_count--)) {
1339 buffer_info = &rx_ring->buffer_info[i];
1340 skb = netdev_alloc_skb(netdev, bufsz);
1341 if (unlikely(!skb)) {
1342
1343 adapter->stats.rx_alloc_buff_failed++;
1344 break;
1345 }
1346
1347 skb_reserve(skb, NET_IP_ALIGN);
1348 buffer_info->skb = skb;
1349
1350 buffer_info->dma = dma_map_single(&pdev->dev,
1351 buffer_info->rx_buffer,
1352 buffer_info->length,
1353 DMA_FROM_DEVICE);
1354 if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) {
1355 dev_kfree_skb(skb);
1356 buffer_info->skb = NULL;
1357 buffer_info->dma = 0;
1358 adapter->stats.rx_alloc_buff_failed++;
1359 break;
1360 }
1361 buffer_info->mapped = true;
1362 rx_desc = PCH_GBE_RX_DESC(*rx_ring, i);
1363 rx_desc->buffer_addr = (buffer_info->dma);
1364 rx_desc->gbec_status = DSC_INIT16;
1365
1366 netdev_dbg(netdev,
1367 "i = %d buffer_info->dma = 0x08%llx buffer_info->length = 0x%x\n",
1368 i, (unsigned long long)buffer_info->dma,
1369 buffer_info->length);
1370
1371 if (unlikely(++i == rx_ring->count))
1372 i = 0;
1373 }
1374 if (likely(rx_ring->next_to_use != i)) {
1375 rx_ring->next_to_use = i;
1376 if (unlikely(i-- == 0))
1377 i = (rx_ring->count - 1);
1378 iowrite32(rx_ring->dma +
1379 (int)sizeof(struct pch_gbe_rx_desc) * i,
1380 &hw->reg->RX_DSC_SW_P);
1381 }
1382 return;
1383}
1384
1385static int
1386pch_gbe_alloc_rx_buffers_pool(struct pch_gbe_adapter *adapter,
1387 struct pch_gbe_rx_ring *rx_ring, int cleaned_count)
1388{
1389 struct pci_dev *pdev = adapter->pdev;
1390 struct pch_gbe_buffer *buffer_info;
1391 unsigned int i;
1392 unsigned int bufsz;
1393 unsigned int size;
1394
1395 bufsz = adapter->rx_buffer_len;
1396
1397 size = rx_ring->count * bufsz + PCH_GBE_RESERVE_MEMORY;
1398 rx_ring->rx_buff_pool =
1399 dma_alloc_coherent(&pdev->dev, size,
1400 &rx_ring->rx_buff_pool_logic, GFP_KERNEL);
1401 if (!rx_ring->rx_buff_pool)
1402 return -ENOMEM;
1403
1404 rx_ring->rx_buff_pool_size = size;
1405 for (i = 0; i < rx_ring->count; i++) {
1406 buffer_info = &rx_ring->buffer_info[i];
1407 buffer_info->rx_buffer = rx_ring->rx_buff_pool + bufsz * i;
1408 buffer_info->length = bufsz;
1409 }
1410 return 0;
1411}
1412
1413
1414
1415
1416
1417
1418static void pch_gbe_alloc_tx_buffers(struct pch_gbe_adapter *adapter,
1419 struct pch_gbe_tx_ring *tx_ring)
1420{
1421 struct pch_gbe_buffer *buffer_info;
1422 struct sk_buff *skb;
1423 unsigned int i;
1424 unsigned int bufsz;
1425 struct pch_gbe_tx_desc *tx_desc;
1426
1427 bufsz =
1428 adapter->hw.mac.max_frame_size + PCH_GBE_DMA_ALIGN + NET_IP_ALIGN;
1429
1430 for (i = 0; i < tx_ring->count; i++) {
1431 buffer_info = &tx_ring->buffer_info[i];
1432 skb = netdev_alloc_skb(adapter->netdev, bufsz);
1433 skb_reserve(skb, PCH_GBE_DMA_ALIGN);
1434 buffer_info->skb = skb;
1435 tx_desc = PCH_GBE_TX_DESC(*tx_ring, i);
1436 tx_desc->gbec_status = (DSC_INIT16);
1437 }
1438 return;
1439}
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449static bool
1450pch_gbe_clean_tx(struct pch_gbe_adapter *adapter,
1451 struct pch_gbe_tx_ring *tx_ring)
1452{
1453 struct pch_gbe_tx_desc *tx_desc;
1454 struct pch_gbe_buffer *buffer_info;
1455 struct sk_buff *skb;
1456 unsigned int i;
1457 unsigned int cleaned_count = 0;
1458 bool cleaned = false;
1459 int unused, thresh;
1460
1461 netdev_dbg(adapter->netdev, "next_to_clean : %d\n",
1462 tx_ring->next_to_clean);
1463
1464 i = tx_ring->next_to_clean;
1465 tx_desc = PCH_GBE_TX_DESC(*tx_ring, i);
1466 netdev_dbg(adapter->netdev, "gbec_status:0x%04x dma_status:0x%04x\n",
1467 tx_desc->gbec_status, tx_desc->dma_status);
1468
1469 unused = PCH_GBE_DESC_UNUSED(tx_ring);
1470 thresh = tx_ring->count - NAPI_POLL_WEIGHT;
1471 if ((tx_desc->gbec_status == DSC_INIT16) && (unused < thresh))
1472 {
1473 int j, k;
1474 if (unused < 8) {
1475 netdev_dbg(adapter->netdev,
1476 "clean_tx: transmit queue warning (%x,%x) unused=%d\n",
1477 tx_ring->next_to_clean, tx_ring->next_to_use,
1478 unused);
1479 }
1480
1481
1482 k = i;
1483 for (j = 0; j < NAPI_POLL_WEIGHT; j++)
1484 {
1485 tx_desc = PCH_GBE_TX_DESC(*tx_ring, k);
1486 if (tx_desc->gbec_status != DSC_INIT16) break;
1487 if (++k >= tx_ring->count) k = 0;
1488 }
1489 if (j < NAPI_POLL_WEIGHT) {
1490 netdev_dbg(adapter->netdev,
1491 "clean_tx: unused=%d loops=%d found tx_desc[%x,%x:%x].gbec_status=%04x\n",
1492 unused, j, i, k, tx_ring->next_to_use,
1493 tx_desc->gbec_status);
1494 i = k;
1495 }
1496 }
1497
1498 while ((tx_desc->gbec_status & DSC_INIT16) == 0x0000) {
1499 netdev_dbg(adapter->netdev, "gbec_status:0x%04x\n",
1500 tx_desc->gbec_status);
1501 buffer_info = &tx_ring->buffer_info[i];
1502 skb = buffer_info->skb;
1503 cleaned = true;
1504
1505 if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_ABT)) {
1506 adapter->stats.tx_aborted_errors++;
1507 netdev_err(adapter->netdev, "Transfer Abort Error\n");
1508 } else if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_CRSER)
1509 ) {
1510 adapter->stats.tx_carrier_errors++;
1511 netdev_err(adapter->netdev,
1512 "Transfer Carrier Sense Error\n");
1513 } else if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_EXCOL)
1514 ) {
1515 adapter->stats.tx_aborted_errors++;
1516 netdev_err(adapter->netdev,
1517 "Transfer Collision Abort Error\n");
1518 } else if ((tx_desc->gbec_status &
1519 (PCH_GBE_TXD_GMAC_STAT_SNGCOL |
1520 PCH_GBE_TXD_GMAC_STAT_MLTCOL))) {
1521 adapter->stats.collisions++;
1522 adapter->stats.tx_packets++;
1523 adapter->stats.tx_bytes += skb->len;
1524 netdev_dbg(adapter->netdev, "Transfer Collision\n");
1525 } else if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_CMPLT)
1526 ) {
1527 adapter->stats.tx_packets++;
1528 adapter->stats.tx_bytes += skb->len;
1529 }
1530 if (buffer_info->mapped) {
1531 netdev_dbg(adapter->netdev,
1532 "unmap buffer_info->dma : %d\n", i);
1533 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
1534 buffer_info->length, DMA_TO_DEVICE);
1535 buffer_info->mapped = false;
1536 }
1537 if (buffer_info->skb) {
1538 netdev_dbg(adapter->netdev,
1539 "trim buffer_info->skb : %d\n", i);
1540 skb_trim(buffer_info->skb, 0);
1541 }
1542 tx_desc->gbec_status = DSC_INIT16;
1543 if (unlikely(++i == tx_ring->count))
1544 i = 0;
1545 tx_desc = PCH_GBE_TX_DESC(*tx_ring, i);
1546
1547
1548 if (cleaned_count++ == NAPI_POLL_WEIGHT) {
1549 cleaned = false;
1550 break;
1551 }
1552 }
1553 netdev_dbg(adapter->netdev,
1554 "called pch_gbe_unmap_and_free_tx_resource() %d count\n",
1555 cleaned_count);
1556 if (cleaned_count > 0) {
1557
1558 netif_tx_lock(adapter->netdev);
1559 if (unlikely(cleaned && (netif_queue_stopped(adapter->netdev))))
1560 {
1561 netif_wake_queue(adapter->netdev);
1562 adapter->stats.tx_restart_count++;
1563 netdev_dbg(adapter->netdev, "Tx wake queue\n");
1564 }
1565
1566 tx_ring->next_to_clean = i;
1567
1568 netdev_dbg(adapter->netdev, "next_to_clean : %d\n",
1569 tx_ring->next_to_clean);
1570 netif_tx_unlock(adapter->netdev);
1571 }
1572 return cleaned;
1573}
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585static bool
1586pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
1587 struct pch_gbe_rx_ring *rx_ring,
1588 int *work_done, int work_to_do)
1589{
1590 struct net_device *netdev = adapter->netdev;
1591 struct pci_dev *pdev = adapter->pdev;
1592 struct pch_gbe_buffer *buffer_info;
1593 struct pch_gbe_rx_desc *rx_desc;
1594 u32 length;
1595 unsigned int i;
1596 unsigned int cleaned_count = 0;
1597 bool cleaned = false;
1598 struct sk_buff *skb;
1599 u8 dma_status;
1600 u16 gbec_status;
1601 u32 tcp_ip_status;
1602
1603 i = rx_ring->next_to_clean;
1604
1605 while (*work_done < work_to_do) {
1606
1607 rx_desc = PCH_GBE_RX_DESC(*rx_ring, i);
1608 if (rx_desc->gbec_status == DSC_INIT16)
1609 break;
1610 cleaned = true;
1611 cleaned_count++;
1612
1613 dma_status = rx_desc->dma_status;
1614 gbec_status = rx_desc->gbec_status;
1615 tcp_ip_status = rx_desc->tcp_ip_status;
1616 rx_desc->gbec_status = DSC_INIT16;
1617 buffer_info = &rx_ring->buffer_info[i];
1618 skb = buffer_info->skb;
1619 buffer_info->skb = NULL;
1620
1621
1622 dma_unmap_single(&pdev->dev, buffer_info->dma,
1623 buffer_info->length, DMA_FROM_DEVICE);
1624 buffer_info->mapped = false;
1625
1626 netdev_dbg(netdev,
1627 "RxDecNo = 0x%04x Status[DMA:0x%02x GBE:0x%04x TCP:0x%08x] BufInf = 0x%p\n",
1628 i, dma_status, gbec_status, tcp_ip_status,
1629 buffer_info);
1630
1631 if (unlikely(gbec_status & PCH_GBE_RXD_GMAC_STAT_NOTOCTAL)) {
1632 adapter->stats.rx_frame_errors++;
1633 netdev_err(netdev, "Receive Not Octal Error\n");
1634 } else if (unlikely(gbec_status &
1635 PCH_GBE_RXD_GMAC_STAT_NBLERR)) {
1636 adapter->stats.rx_frame_errors++;
1637 netdev_err(netdev, "Receive Nibble Error\n");
1638 } else if (unlikely(gbec_status &
1639 PCH_GBE_RXD_GMAC_STAT_CRCERR)) {
1640 adapter->stats.rx_crc_errors++;
1641 netdev_err(netdev, "Receive CRC Error\n");
1642 } else {
1643
1644
1645 length = (rx_desc->rx_words_eob) - 3 - ETH_FCS_LEN;
1646 if (rx_desc->rx_words_eob & 0x02)
1647 length = length - 4;
1648
1649
1650
1651
1652 memcpy(skb->data, buffer_info->rx_buffer, length);
1653
1654
1655 adapter->stats.rx_bytes += length;
1656 adapter->stats.rx_packets++;
1657 if ((gbec_status & PCH_GBE_RXD_GMAC_STAT_MARMLT))
1658 adapter->stats.multicast++;
1659
1660 skb_put(skb, length);
1661
1662 pch_rx_timestamp(adapter, skb);
1663
1664 skb->protocol = eth_type_trans(skb, netdev);
1665 if (tcp_ip_status & PCH_GBE_RXD_ACC_STAT_TCPIPOK)
1666 skb->ip_summed = CHECKSUM_UNNECESSARY;
1667 else
1668 skb->ip_summed = CHECKSUM_NONE;
1669
1670 napi_gro_receive(&adapter->napi, skb);
1671 (*work_done)++;
1672 netdev_dbg(netdev,
1673 "Receive skb->ip_summed: %d length: %d\n",
1674 skb->ip_summed, length);
1675 }
1676
1677 if (unlikely(cleaned_count >= PCH_GBE_RX_BUFFER_WRITE)) {
1678 pch_gbe_alloc_rx_buffers(adapter, rx_ring,
1679 cleaned_count);
1680 cleaned_count = 0;
1681 }
1682 if (++i == rx_ring->count)
1683 i = 0;
1684 }
1685 rx_ring->next_to_clean = i;
1686 if (cleaned_count)
1687 pch_gbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
1688 return cleaned;
1689}
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter,
1700 struct pch_gbe_tx_ring *tx_ring)
1701{
1702 struct pci_dev *pdev = adapter->pdev;
1703 struct pch_gbe_tx_desc *tx_desc;
1704 int size;
1705 int desNo;
1706
1707 size = (int)sizeof(struct pch_gbe_buffer) * tx_ring->count;
1708 tx_ring->buffer_info = vzalloc(size);
1709 if (!tx_ring->buffer_info)
1710 return -ENOMEM;
1711
1712 tx_ring->size = tx_ring->count * (int)sizeof(struct pch_gbe_tx_desc);
1713
1714 tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
1715 &tx_ring->dma, GFP_KERNEL);
1716 if (!tx_ring->desc) {
1717 vfree(tx_ring->buffer_info);
1718 return -ENOMEM;
1719 }
1720
1721 tx_ring->next_to_use = 0;
1722 tx_ring->next_to_clean = 0;
1723
1724 for (desNo = 0; desNo < tx_ring->count; desNo++) {
1725 tx_desc = PCH_GBE_TX_DESC(*tx_ring, desNo);
1726 tx_desc->gbec_status = DSC_INIT16;
1727 }
1728 netdev_dbg(adapter->netdev,
1729 "tx_ring->desc = 0x%p tx_ring->dma = 0x%08llx next_to_clean = 0x%08x next_to_use = 0x%08x\n",
1730 tx_ring->desc, (unsigned long long)tx_ring->dma,
1731 tx_ring->next_to_clean, tx_ring->next_to_use);
1732 return 0;
1733}
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743int pch_gbe_setup_rx_resources(struct pch_gbe_adapter *adapter,
1744 struct pch_gbe_rx_ring *rx_ring)
1745{
1746 struct pci_dev *pdev = adapter->pdev;
1747 struct pch_gbe_rx_desc *rx_desc;
1748 int size;
1749 int desNo;
1750
1751 size = (int)sizeof(struct pch_gbe_buffer) * rx_ring->count;
1752 rx_ring->buffer_info = vzalloc(size);
1753 if (!rx_ring->buffer_info)
1754 return -ENOMEM;
1755
1756 rx_ring->size = rx_ring->count * (int)sizeof(struct pch_gbe_rx_desc);
1757 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
1758 &rx_ring->dma, GFP_KERNEL);
1759 if (!rx_ring->desc) {
1760 vfree(rx_ring->buffer_info);
1761 return -ENOMEM;
1762 }
1763 rx_ring->next_to_clean = 0;
1764 rx_ring->next_to_use = 0;
1765 for (desNo = 0; desNo < rx_ring->count; desNo++) {
1766 rx_desc = PCH_GBE_RX_DESC(*rx_ring, desNo);
1767 rx_desc->gbec_status = DSC_INIT16;
1768 }
1769 netdev_dbg(adapter->netdev,
1770 "rx_ring->desc = 0x%p rx_ring->dma = 0x%08llx next_to_clean = 0x%08x next_to_use = 0x%08x\n",
1771 rx_ring->desc, (unsigned long long)rx_ring->dma,
1772 rx_ring->next_to_clean, rx_ring->next_to_use);
1773 return 0;
1774}
1775
1776
1777
1778
1779
1780
1781void pch_gbe_free_tx_resources(struct pch_gbe_adapter *adapter,
1782 struct pch_gbe_tx_ring *tx_ring)
1783{
1784 struct pci_dev *pdev = adapter->pdev;
1785
1786 pch_gbe_clean_tx_ring(adapter, tx_ring);
1787 vfree(tx_ring->buffer_info);
1788 tx_ring->buffer_info = NULL;
1789 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
1790 tx_ring->dma);
1791 tx_ring->desc = NULL;
1792}
1793
1794
1795
1796
1797
1798
1799void pch_gbe_free_rx_resources(struct pch_gbe_adapter *adapter,
1800 struct pch_gbe_rx_ring *rx_ring)
1801{
1802 struct pci_dev *pdev = adapter->pdev;
1803
1804 pch_gbe_clean_rx_ring(adapter, rx_ring);
1805 vfree(rx_ring->buffer_info);
1806 rx_ring->buffer_info = NULL;
1807 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
1808 rx_ring->dma);
1809 rx_ring->desc = NULL;
1810}
1811
1812
1813
1814
1815
1816
1817
1818
1819static int pch_gbe_request_irq(struct pch_gbe_adapter *adapter)
1820{
1821 struct net_device *netdev = adapter->netdev;
1822 int err;
1823
1824 err = pci_alloc_irq_vectors(adapter->pdev, 1, 1, PCI_IRQ_ALL_TYPES);
1825 if (err < 0)
1826 return err;
1827
1828 adapter->irq = pci_irq_vector(adapter->pdev, 0);
1829
1830 err = request_irq(adapter->irq, &pch_gbe_intr, IRQF_SHARED,
1831 netdev->name, netdev);
1832 if (err)
1833 netdev_err(netdev, "Unable to allocate interrupt Error: %d\n",
1834 err);
1835 netdev_dbg(netdev, "have_msi : %d return : 0x%04x\n",
1836 pci_dev_msi_enabled(adapter->pdev), err);
1837 return err;
1838}
1839
1840
1841
1842
1843
1844
1845
1846
1847int pch_gbe_up(struct pch_gbe_adapter *adapter)
1848{
1849 struct net_device *netdev = adapter->netdev;
1850 struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring;
1851 struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring;
1852 int err = -EINVAL;
1853
1854
1855 if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
1856 netdev_err(netdev, "Error: Invalid MAC address\n");
1857 goto out;
1858 }
1859
1860
1861 pch_gbe_set_multi(netdev);
1862
1863 pch_gbe_setup_tctl(adapter);
1864 pch_gbe_configure_tx(adapter);
1865 pch_gbe_setup_rctl(adapter);
1866 pch_gbe_configure_rx(adapter);
1867
1868 err = pch_gbe_request_irq(adapter);
1869 if (err) {
1870 netdev_err(netdev,
1871 "Error: can't bring device up - irq request failed\n");
1872 goto out;
1873 }
1874 err = pch_gbe_alloc_rx_buffers_pool(adapter, rx_ring, rx_ring->count);
1875 if (err) {
1876 netdev_err(netdev,
1877 "Error: can't bring device up - alloc rx buffers pool failed\n");
1878 goto freeirq;
1879 }
1880 pch_gbe_alloc_tx_buffers(adapter, tx_ring);
1881 pch_gbe_alloc_rx_buffers(adapter, rx_ring, rx_ring->count);
1882 adapter->tx_queue_len = netdev->tx_queue_len;
1883 pch_gbe_enable_dma_rx(&adapter->hw);
1884 pch_gbe_enable_mac_rx(&adapter->hw);
1885
1886 mod_timer(&adapter->watchdog_timer, jiffies);
1887
1888 napi_enable(&adapter->napi);
1889 pch_gbe_irq_enable(adapter);
1890 netif_start_queue(adapter->netdev);
1891
1892 return 0;
1893
1894freeirq:
1895 pch_gbe_free_irq(adapter);
1896out:
1897 return err;
1898}
1899
1900
1901
1902
1903
1904void pch_gbe_down(struct pch_gbe_adapter *adapter)
1905{
1906 struct net_device *netdev = adapter->netdev;
1907 struct pci_dev *pdev = adapter->pdev;
1908 struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring;
1909
1910
1911
1912 napi_disable(&adapter->napi);
1913 atomic_set(&adapter->irq_sem, 0);
1914
1915 pch_gbe_irq_disable(adapter);
1916 pch_gbe_free_irq(adapter);
1917
1918 del_timer_sync(&adapter->watchdog_timer);
1919
1920 netdev->tx_queue_len = adapter->tx_queue_len;
1921 netif_carrier_off(netdev);
1922 netif_stop_queue(netdev);
1923
1924 if ((pdev->error_state) && (pdev->error_state != pci_channel_io_normal))
1925 pch_gbe_reset(adapter);
1926 pch_gbe_clean_tx_ring(adapter, adapter->tx_ring);
1927 pch_gbe_clean_rx_ring(adapter, adapter->rx_ring);
1928
1929 dma_free_coherent(&adapter->pdev->dev, rx_ring->rx_buff_pool_size,
1930 rx_ring->rx_buff_pool, rx_ring->rx_buff_pool_logic);
1931 rx_ring->rx_buff_pool_logic = 0;
1932 rx_ring->rx_buff_pool_size = 0;
1933 rx_ring->rx_buff_pool = NULL;
1934}
1935
1936
1937
1938
1939
1940
1941
1942
1943static int pch_gbe_sw_init(struct pch_gbe_adapter *adapter)
1944{
1945 struct pch_gbe_hw *hw = &adapter->hw;
1946 struct net_device *netdev = adapter->netdev;
1947
1948 adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_2048;
1949 hw->mac.max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1950 hw->mac.min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
1951 hw->phy.reset_delay_us = PCH_GBE_PHY_RESET_DELAY_US;
1952
1953 if (pch_gbe_alloc_queues(adapter)) {
1954 netdev_err(netdev, "Unable to allocate memory for queues\n");
1955 return -ENOMEM;
1956 }
1957 spin_lock_init(&adapter->hw.miim_lock);
1958 spin_lock_init(&adapter->stats_lock);
1959 spin_lock_init(&adapter->ethtool_lock);
1960 atomic_set(&adapter->irq_sem, 0);
1961 pch_gbe_irq_disable(adapter);
1962
1963 pch_gbe_init_stats(adapter);
1964
1965 netdev_dbg(netdev,
1966 "rx_buffer_len : %d mac.min_frame_size : %d mac.max_frame_size : %d\n",
1967 (u32) adapter->rx_buffer_len,
1968 hw->mac.min_frame_size, hw->mac.max_frame_size);
1969 return 0;
1970}
1971
1972
1973
1974
1975
1976
1977
1978
1979static int pch_gbe_open(struct net_device *netdev)
1980{
1981 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
1982 struct pch_gbe_hw *hw = &adapter->hw;
1983 int err;
1984
1985
1986 err = pch_gbe_setup_tx_resources(adapter, adapter->tx_ring);
1987 if (err)
1988 goto err_setup_tx;
1989
1990 err = pch_gbe_setup_rx_resources(adapter, adapter->rx_ring);
1991 if (err)
1992 goto err_setup_rx;
1993 pch_gbe_phy_power_up(hw);
1994 err = pch_gbe_up(adapter);
1995 if (err)
1996 goto err_up;
1997 netdev_dbg(netdev, "Success End\n");
1998 return 0;
1999
2000err_up:
2001 if (!adapter->wake_up_evt)
2002 pch_gbe_phy_power_down(hw);
2003 pch_gbe_free_rx_resources(adapter, adapter->rx_ring);
2004err_setup_rx:
2005 pch_gbe_free_tx_resources(adapter, adapter->tx_ring);
2006err_setup_tx:
2007 pch_gbe_reset(adapter);
2008 netdev_err(netdev, "Error End\n");
2009 return err;
2010}
2011
2012
2013
2014
2015
2016
2017
2018static int pch_gbe_stop(struct net_device *netdev)
2019{
2020 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2021 struct pch_gbe_hw *hw = &adapter->hw;
2022
2023 pch_gbe_down(adapter);
2024 if (!adapter->wake_up_evt)
2025 pch_gbe_phy_power_down(hw);
2026 pch_gbe_free_tx_resources(adapter, adapter->tx_ring);
2027 pch_gbe_free_rx_resources(adapter, adapter->rx_ring);
2028 return 0;
2029}
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039static netdev_tx_t pch_gbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2040{
2041 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2042 struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring;
2043
2044 if (unlikely(!PCH_GBE_DESC_UNUSED(tx_ring))) {
2045 netif_stop_queue(netdev);
2046 netdev_dbg(netdev,
2047 "Return : BUSY next_to use : 0x%08x next_to clean : 0x%08x\n",
2048 tx_ring->next_to_use, tx_ring->next_to_clean);
2049 return NETDEV_TX_BUSY;
2050 }
2051
2052
2053 pch_gbe_tx_queue(adapter, tx_ring, skb);
2054 return NETDEV_TX_OK;
2055}
2056
2057
2058
2059
2060
2061static void pch_gbe_set_multi(struct net_device *netdev)
2062{
2063 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2064 struct pch_gbe_hw *hw = &adapter->hw;
2065 struct netdev_hw_addr *ha;
2066 u32 rctl, adrmask;
2067 int mc_count, i;
2068
2069 netdev_dbg(netdev, "netdev->flags : 0x%08x\n", netdev->flags);
2070
2071
2072 rctl = ioread32(&hw->reg->RX_MODE);
2073 rctl |= PCH_GBE_ADD_FIL_EN | PCH_GBE_MLT_FIL_EN;
2074
2075
2076 if (netdev->flags & IFF_PROMISC)
2077 rctl &= ~(PCH_GBE_ADD_FIL_EN | PCH_GBE_MLT_FIL_EN);
2078
2079
2080
2081
2082 mc_count = netdev_mc_count(netdev);
2083 if ((netdev->flags & IFF_ALLMULTI) || mc_count >= PCH_GBE_MAR_ENTRIES)
2084 rctl &= ~PCH_GBE_MLT_FIL_EN;
2085
2086 iowrite32(rctl, &hw->reg->RX_MODE);
2087
2088
2089
2090
2091 if (!(rctl & PCH_GBE_MLT_FIL_EN))
2092 return;
2093
2094
2095
2096
2097 i = 1;
2098 netdev_for_each_mc_addr(ha, netdev)
2099 pch_gbe_mac_mar_set(hw, ha->addr, i++);
2100
2101
2102 for (; i < PCH_GBE_MAR_ENTRIES; i++) {
2103
2104 adrmask = ioread32(&hw->reg->ADDR_MASK);
2105 iowrite32(adrmask | BIT(i), &hw->reg->ADDR_MASK);
2106
2107 pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
2108
2109 iowrite32(0, &hw->reg->mac_adr[i].high);
2110 iowrite32(0, &hw->reg->mac_adr[i].low);
2111 }
2112
2113 netdev_dbg(netdev,
2114 "RX_MODE reg(check bit31,30 ADD,MLT) : 0x%08x netdev->mc_count : 0x%08x\n",
2115 ioread32(&hw->reg->RX_MODE), mc_count);
2116}
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126static int pch_gbe_set_mac(struct net_device *netdev, void *addr)
2127{
2128 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2129 struct sockaddr *skaddr = addr;
2130 int ret_val;
2131
2132 if (!is_valid_ether_addr(skaddr->sa_data)) {
2133 ret_val = -EADDRNOTAVAIL;
2134 } else {
2135 eth_hw_addr_set(netdev, skaddr->sa_data);
2136 memcpy(adapter->hw.mac.addr, skaddr->sa_data, netdev->addr_len);
2137 pch_gbe_mac_mar_set(&adapter->hw, adapter->hw.mac.addr, 0);
2138 ret_val = 0;
2139 }
2140 netdev_dbg(netdev, "ret_val : 0x%08x\n", ret_val);
2141 netdev_dbg(netdev, "dev_addr : %pM\n", netdev->dev_addr);
2142 netdev_dbg(netdev, "mac_addr : %pM\n", adapter->hw.mac.addr);
2143 netdev_dbg(netdev, "MAC_ADR1AB reg : 0x%08x 0x%08x\n",
2144 ioread32(&adapter->hw.reg->mac_adr[0].high),
2145 ioread32(&adapter->hw.reg->mac_adr[0].low));
2146 return ret_val;
2147}
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157static int pch_gbe_change_mtu(struct net_device *netdev, int new_mtu)
2158{
2159 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2160 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
2161 unsigned long old_rx_buffer_len = adapter->rx_buffer_len;
2162 int err;
2163
2164 if (max_frame <= PCH_GBE_FRAME_SIZE_2048)
2165 adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_2048;
2166 else if (max_frame <= PCH_GBE_FRAME_SIZE_4096)
2167 adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_4096;
2168 else if (max_frame <= PCH_GBE_FRAME_SIZE_8192)
2169 adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_8192;
2170 else
2171 adapter->rx_buffer_len = PCH_GBE_MAX_RX_BUFFER_SIZE;
2172
2173 if (netif_running(netdev)) {
2174 pch_gbe_down(adapter);
2175 err = pch_gbe_up(adapter);
2176 if (err) {
2177 adapter->rx_buffer_len = old_rx_buffer_len;
2178 pch_gbe_up(adapter);
2179 return err;
2180 } else {
2181 netdev->mtu = new_mtu;
2182 adapter->hw.mac.max_frame_size = max_frame;
2183 }
2184 } else {
2185 pch_gbe_reset(adapter);
2186 netdev->mtu = new_mtu;
2187 adapter->hw.mac.max_frame_size = max_frame;
2188 }
2189
2190 netdev_dbg(netdev,
2191 "max_frame : %d rx_buffer_len : %d mtu : %d max_frame_size : %d\n",
2192 max_frame, (u32) adapter->rx_buffer_len, netdev->mtu,
2193 adapter->hw.mac.max_frame_size);
2194 return 0;
2195}
2196
2197
2198
2199
2200
2201
2202
2203
2204static int pch_gbe_set_features(struct net_device *netdev,
2205 netdev_features_t features)
2206{
2207 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2208 netdev_features_t changed = features ^ netdev->features;
2209
2210 if (!(changed & NETIF_F_RXCSUM))
2211 return 0;
2212
2213 if (netif_running(netdev))
2214 pch_gbe_reinit_locked(adapter);
2215 else
2216 pch_gbe_reset(adapter);
2217
2218 return 0;
2219}
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230static int pch_gbe_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2231{
2232 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2233
2234 netdev_dbg(netdev, "cmd : 0x%04x\n", cmd);
2235
2236 if (cmd == SIOCSHWTSTAMP)
2237 return hwtstamp_ioctl(netdev, ifr, cmd);
2238
2239 return generic_mii_ioctl(&adapter->mii, if_mii(ifr), cmd, NULL);
2240}
2241
2242
2243
2244
2245
2246
2247static void pch_gbe_tx_timeout(struct net_device *netdev, unsigned int txqueue)
2248{
2249 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2250
2251
2252 adapter->stats.tx_timeout_count++;
2253 schedule_work(&adapter->reset_task);
2254}
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264static int pch_gbe_napi_poll(struct napi_struct *napi, int budget)
2265{
2266 struct pch_gbe_adapter *adapter =
2267 container_of(napi, struct pch_gbe_adapter, napi);
2268 int work_done = 0;
2269 bool poll_end_flag = false;
2270 bool cleaned = false;
2271
2272 netdev_dbg(adapter->netdev, "budget : %d\n", budget);
2273
2274 pch_gbe_clean_rx(adapter, adapter->rx_ring, &work_done, budget);
2275 cleaned = pch_gbe_clean_tx(adapter, adapter->tx_ring);
2276
2277 if (cleaned)
2278 work_done = budget;
2279
2280
2281
2282 if (work_done < budget)
2283 poll_end_flag = true;
2284
2285 if (poll_end_flag) {
2286 napi_complete_done(napi, work_done);
2287 pch_gbe_irq_enable(adapter);
2288 }
2289
2290 if (adapter->rx_stop_flag) {
2291 adapter->rx_stop_flag = false;
2292 pch_gbe_enable_dma_rx(&adapter->hw);
2293 }
2294
2295 netdev_dbg(adapter->netdev,
2296 "poll_end_flag : %d work_done : %d budget : %d\n",
2297 poll_end_flag, work_done, budget);
2298
2299 return work_done;
2300}
2301
2302#ifdef CONFIG_NET_POLL_CONTROLLER
2303
2304
2305
2306
2307static void pch_gbe_netpoll(struct net_device *netdev)
2308{
2309 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2310
2311 disable_irq(adapter->irq);
2312 pch_gbe_intr(adapter->irq, netdev);
2313 enable_irq(adapter->irq);
2314}
2315#endif
2316
2317static const struct net_device_ops pch_gbe_netdev_ops = {
2318 .ndo_open = pch_gbe_open,
2319 .ndo_stop = pch_gbe_stop,
2320 .ndo_start_xmit = pch_gbe_xmit_frame,
2321 .ndo_set_mac_address = pch_gbe_set_mac,
2322 .ndo_tx_timeout = pch_gbe_tx_timeout,
2323 .ndo_change_mtu = pch_gbe_change_mtu,
2324 .ndo_set_features = pch_gbe_set_features,
2325 .ndo_eth_ioctl = pch_gbe_ioctl,
2326 .ndo_set_rx_mode = pch_gbe_set_multi,
2327#ifdef CONFIG_NET_POLL_CONTROLLER
2328 .ndo_poll_controller = pch_gbe_netpoll,
2329#endif
2330};
2331
2332static pci_ers_result_t pch_gbe_io_error_detected(struct pci_dev *pdev,
2333 pci_channel_state_t state)
2334{
2335 struct net_device *netdev = pci_get_drvdata(pdev);
2336 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2337
2338 netif_device_detach(netdev);
2339 if (netif_running(netdev))
2340 pch_gbe_down(adapter);
2341 pci_disable_device(pdev);
2342
2343 return PCI_ERS_RESULT_NEED_RESET;
2344}
2345
2346static pci_ers_result_t pch_gbe_io_slot_reset(struct pci_dev *pdev)
2347{
2348 struct net_device *netdev = pci_get_drvdata(pdev);
2349 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2350 struct pch_gbe_hw *hw = &adapter->hw;
2351
2352 if (pci_enable_device(pdev)) {
2353 netdev_err(netdev, "Cannot re-enable PCI device after reset\n");
2354 return PCI_ERS_RESULT_DISCONNECT;
2355 }
2356 pci_set_master(pdev);
2357 pci_enable_wake(pdev, PCI_D0, 0);
2358 pch_gbe_phy_power_up(hw);
2359 pch_gbe_reset(adapter);
2360
2361 pch_gbe_mac_set_wol_event(hw, 0);
2362
2363 return PCI_ERS_RESULT_RECOVERED;
2364}
2365
2366static void pch_gbe_io_resume(struct pci_dev *pdev)
2367{
2368 struct net_device *netdev = pci_get_drvdata(pdev);
2369 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2370
2371 if (netif_running(netdev)) {
2372 if (pch_gbe_up(adapter)) {
2373 netdev_dbg(netdev,
2374 "can't bring device back up after reset\n");
2375 return;
2376 }
2377 }
2378 netif_device_attach(netdev);
2379}
2380
2381static int __pch_gbe_suspend(struct pci_dev *pdev)
2382{
2383 struct net_device *netdev = pci_get_drvdata(pdev);
2384 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2385 struct pch_gbe_hw *hw = &adapter->hw;
2386 u32 wufc = adapter->wake_up_evt;
2387
2388 netif_device_detach(netdev);
2389 if (netif_running(netdev))
2390 pch_gbe_down(adapter);
2391 if (wufc) {
2392 pch_gbe_set_multi(netdev);
2393 pch_gbe_setup_rctl(adapter);
2394 pch_gbe_configure_rx(adapter);
2395 pch_gbe_set_rgmii_ctrl(adapter, hw->mac.link_speed,
2396 hw->mac.link_duplex);
2397 pch_gbe_set_mode(adapter, hw->mac.link_speed,
2398 hw->mac.link_duplex);
2399 pch_gbe_mac_set_wol_event(hw, wufc);
2400 pci_disable_device(pdev);
2401 } else {
2402 pch_gbe_phy_power_down(hw);
2403 pch_gbe_mac_set_wol_event(hw, wufc);
2404 pci_disable_device(pdev);
2405 }
2406 return 0;
2407}
2408
2409#ifdef CONFIG_PM
2410static int pch_gbe_suspend(struct device *device)
2411{
2412 struct pci_dev *pdev = to_pci_dev(device);
2413
2414 return __pch_gbe_suspend(pdev);
2415}
2416
2417static int pch_gbe_resume(struct device *device)
2418{
2419 struct pci_dev *pdev = to_pci_dev(device);
2420 struct net_device *netdev = pci_get_drvdata(pdev);
2421 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2422 struct pch_gbe_hw *hw = &adapter->hw;
2423 u32 err;
2424
2425 err = pci_enable_device(pdev);
2426 if (err) {
2427 netdev_err(netdev, "Cannot enable PCI device from suspend\n");
2428 return err;
2429 }
2430 pci_set_master(pdev);
2431 pch_gbe_phy_power_up(hw);
2432 pch_gbe_reset(adapter);
2433
2434 pch_gbe_mac_set_wol_event(hw, 0);
2435
2436 if (netif_running(netdev))
2437 pch_gbe_up(adapter);
2438 netif_device_attach(netdev);
2439
2440 return 0;
2441}
2442#endif
2443
2444static void pch_gbe_shutdown(struct pci_dev *pdev)
2445{
2446 __pch_gbe_suspend(pdev);
2447 if (system_state == SYSTEM_POWER_OFF) {
2448 pci_wake_from_d3(pdev, true);
2449 pci_set_power_state(pdev, PCI_D3hot);
2450 }
2451}
2452
2453static void pch_gbe_remove(struct pci_dev *pdev)
2454{
2455 struct net_device *netdev = pci_get_drvdata(pdev);
2456 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2457
2458 cancel_work_sync(&adapter->reset_task);
2459 unregister_netdev(netdev);
2460
2461 pch_gbe_phy_hw_reset(&adapter->hw);
2462
2463 free_netdev(netdev);
2464}
2465
2466static int pch_gbe_probe(struct pci_dev *pdev,
2467 const struct pci_device_id *pci_id)
2468{
2469 struct net_device *netdev;
2470 struct pch_gbe_adapter *adapter;
2471 int ret;
2472
2473 ret = pcim_enable_device(pdev);
2474 if (ret)
2475 return ret;
2476
2477 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
2478 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
2479 if (ret) {
2480 dev_err(&pdev->dev, "ERR: No usable DMA configuration, aborting\n");
2481 return ret;
2482 }
2483 }
2484
2485 ret = pcim_iomap_regions(pdev, 1 << PCH_GBE_PCI_BAR, pci_name(pdev));
2486 if (ret) {
2487 dev_err(&pdev->dev,
2488 "ERR: Can't reserve PCI I/O and memory resources\n");
2489 return ret;
2490 }
2491 pci_set_master(pdev);
2492
2493 netdev = alloc_etherdev((int)sizeof(struct pch_gbe_adapter));
2494 if (!netdev)
2495 return -ENOMEM;
2496 SET_NETDEV_DEV(netdev, &pdev->dev);
2497
2498 pci_set_drvdata(pdev, netdev);
2499 adapter = netdev_priv(netdev);
2500 adapter->netdev = netdev;
2501 adapter->pdev = pdev;
2502 adapter->hw.back = adapter;
2503 adapter->hw.reg = pcim_iomap_table(pdev)[PCH_GBE_PCI_BAR];
2504
2505 adapter->pdata = (struct pch_gbe_privdata *)pci_id->driver_data;
2506 if (adapter->pdata && adapter->pdata->platform_init) {
2507 ret = adapter->pdata->platform_init(pdev);
2508 if (ret)
2509 goto err_free_netdev;
2510 }
2511
2512 adapter->ptp_pdev =
2513 pci_get_domain_bus_and_slot(pci_domain_nr(adapter->pdev->bus),
2514 adapter->pdev->bus->number,
2515 PCI_DEVFN(12, 4));
2516
2517 netdev->netdev_ops = &pch_gbe_netdev_ops;
2518 netdev->watchdog_timeo = PCH_GBE_WATCHDOG_PERIOD;
2519 netif_napi_add(netdev, &adapter->napi,
2520 pch_gbe_napi_poll, NAPI_POLL_WEIGHT);
2521 netdev->hw_features = NETIF_F_RXCSUM |
2522 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2523 netdev->features = netdev->hw_features;
2524 pch_gbe_set_ethtool_ops(netdev);
2525
2526
2527 netdev->min_mtu = ETH_ZLEN - ETH_HLEN;
2528 netdev->max_mtu = PCH_GBE_MAX_JUMBO_FRAME_SIZE -
2529 (ETH_HLEN + ETH_FCS_LEN);
2530
2531 pch_gbe_mac_load_mac_addr(&adapter->hw);
2532 pch_gbe_mac_reset_hw(&adapter->hw);
2533
2534
2535 ret = pch_gbe_sw_init(adapter);
2536 if (ret)
2537 goto err_free_netdev;
2538
2539
2540 ret = pch_gbe_init_phy(adapter);
2541 if (ret) {
2542 dev_err(&pdev->dev, "PHY initialize error\n");
2543 goto err_free_adapter;
2544 }
2545
2546
2547 ret = pch_gbe_mac_read_mac_addr(&adapter->hw);
2548 if (ret) {
2549 dev_err(&pdev->dev, "MAC address Read Error\n");
2550 goto err_free_adapter;
2551 }
2552
2553 eth_hw_addr_set(netdev, adapter->hw.mac.addr);
2554 if (!is_valid_ether_addr(netdev->dev_addr)) {
2555
2556
2557
2558
2559
2560
2561 dev_err(&pdev->dev, "Invalid MAC address, "
2562 "interface disabled.\n");
2563 }
2564 timer_setup(&adapter->watchdog_timer, pch_gbe_watchdog, 0);
2565
2566 INIT_WORK(&adapter->reset_task, pch_gbe_reset_task);
2567
2568 pch_gbe_check_options(adapter);
2569
2570
2571 adapter->wake_up_evt = PCH_GBE_WL_INIT_SETTING;
2572 dev_info(&pdev->dev, "MAC address : %pM\n", netdev->dev_addr);
2573
2574
2575 pch_gbe_reset(adapter);
2576
2577 ret = register_netdev(netdev);
2578 if (ret)
2579 goto err_free_adapter;
2580
2581 netif_carrier_off(netdev);
2582 netif_stop_queue(netdev);
2583
2584 dev_dbg(&pdev->dev, "PCH Network Connection\n");
2585
2586
2587 if (adapter->pdata && adapter->pdata->phy_disable_hibernate)
2588 pch_gbe_phy_disable_hibernate(&adapter->hw);
2589
2590 device_set_wakeup_enable(&pdev->dev, 1);
2591 return 0;
2592
2593err_free_adapter:
2594 pch_gbe_phy_hw_reset(&adapter->hw);
2595err_free_netdev:
2596 free_netdev(netdev);
2597 return ret;
2598}
2599
2600static void pch_gbe_gpio_remove_table(void *table)
2601{
2602 gpiod_remove_lookup_table(table);
2603}
2604
2605static int pch_gbe_gpio_add_table(struct device *dev, void *table)
2606{
2607 gpiod_add_lookup_table(table);
2608 return devm_add_action_or_reset(dev, pch_gbe_gpio_remove_table, table);
2609}
2610
2611static struct gpiod_lookup_table pch_gbe_minnow_gpio_table = {
2612 .dev_id = "0000:02:00.1",
2613 .table = {
2614 GPIO_LOOKUP("sch_gpio.33158", 13, NULL, GPIO_ACTIVE_LOW),
2615 {}
2616 },
2617};
2618
2619
2620
2621
2622static int pch_gbe_minnow_platform_init(struct pci_dev *pdev)
2623{
2624 struct gpio_desc *gpiod;
2625 int ret;
2626
2627 ret = pch_gbe_gpio_add_table(&pdev->dev, &pch_gbe_minnow_gpio_table);
2628 if (ret)
2629 return ret;
2630
2631 gpiod = devm_gpiod_get(&pdev->dev, NULL, GPIOD_OUT_HIGH);
2632 if (IS_ERR(gpiod))
2633 return dev_err_probe(&pdev->dev, PTR_ERR(gpiod),
2634 "Can't request PHY reset GPIO line\n");
2635
2636 gpiod_set_value(gpiod, 1);
2637 usleep_range(1250, 1500);
2638 gpiod_set_value(gpiod, 0);
2639 usleep_range(1250, 1500);
2640
2641 return ret;
2642}
2643
2644static struct pch_gbe_privdata pch_gbe_minnow_privdata = {
2645 .phy_tx_clk_delay = true,
2646 .phy_disable_hibernate = true,
2647 .platform_init = pch_gbe_minnow_platform_init,
2648};
2649
2650static const struct pci_device_id pch_gbe_pcidev_id[] = {
2651 {.vendor = PCI_VENDOR_ID_INTEL,
2652 .device = PCI_DEVICE_ID_INTEL_IOH1_GBE,
2653 .subvendor = PCI_VENDOR_ID_CIRCUITCO,
2654 .subdevice = PCI_SUBSYSTEM_ID_CIRCUITCO_MINNOWBOARD,
2655 .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
2656 .class_mask = (0xFFFF00),
2657 .driver_data = (kernel_ulong_t)&pch_gbe_minnow_privdata
2658 },
2659 {.vendor = PCI_VENDOR_ID_INTEL,
2660 .device = PCI_DEVICE_ID_INTEL_IOH1_GBE,
2661 .subvendor = PCI_ANY_ID,
2662 .subdevice = PCI_ANY_ID,
2663 .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
2664 .class_mask = (0xFFFF00)
2665 },
2666 {.vendor = PCI_VENDOR_ID_ROHM,
2667 .device = PCI_DEVICE_ID_ROHM_ML7223_GBE,
2668 .subvendor = PCI_ANY_ID,
2669 .subdevice = PCI_ANY_ID,
2670 .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
2671 .class_mask = (0xFFFF00)
2672 },
2673 {.vendor = PCI_VENDOR_ID_ROHM,
2674 .device = PCI_DEVICE_ID_ROHM_ML7831_GBE,
2675 .subvendor = PCI_ANY_ID,
2676 .subdevice = PCI_ANY_ID,
2677 .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
2678 .class_mask = (0xFFFF00)
2679 },
2680
2681 {0}
2682};
2683
2684#ifdef CONFIG_PM
2685static const struct dev_pm_ops pch_gbe_pm_ops = {
2686 .suspend = pch_gbe_suspend,
2687 .resume = pch_gbe_resume,
2688 .freeze = pch_gbe_suspend,
2689 .thaw = pch_gbe_resume,
2690 .poweroff = pch_gbe_suspend,
2691 .restore = pch_gbe_resume,
2692};
2693#endif
2694
2695static const struct pci_error_handlers pch_gbe_err_handler = {
2696 .error_detected = pch_gbe_io_error_detected,
2697 .slot_reset = pch_gbe_io_slot_reset,
2698 .resume = pch_gbe_io_resume
2699};
2700
2701static struct pci_driver pch_gbe_driver = {
2702 .name = KBUILD_MODNAME,
2703 .id_table = pch_gbe_pcidev_id,
2704 .probe = pch_gbe_probe,
2705 .remove = pch_gbe_remove,
2706#ifdef CONFIG_PM
2707 .driver.pm = &pch_gbe_pm_ops,
2708#endif
2709 .shutdown = pch_gbe_shutdown,
2710 .err_handler = &pch_gbe_err_handler
2711};
2712module_pci_driver(pch_gbe_driver);
2713
2714MODULE_DESCRIPTION("EG20T PCH Gigabit ethernet Driver");
2715MODULE_AUTHOR("LAPIS SEMICONDUCTOR, <tshimizu818@gmail.com>");
2716MODULE_LICENSE("GPL");
2717MODULE_DEVICE_TABLE(pci, pch_gbe_pcidev_id);
2718
2719
2720