1
2
3
4
5
6
7
8
9#include "pch_gbe.h"
10#include "pch_gbe_phy.h"
11
12#include <linux/gpio/consumer.h>
13#include <linux/gpio/machine.h>
14#include <linux/iopoll.h>
15#include <linux/module.h>
16#include <linux/net_tstamp.h>
17#include <linux/ptp_classify.h>
18#include <linux/ptp_pch.h>
19#include <linux/gpio.h>
20
21#define PCH_GBE_MAR_ENTRIES 16
22#define PCH_GBE_SHORT_PKT 64
23#define DSC_INIT16 0xC000
24#define PCH_GBE_DMA_ALIGN 0
25#define PCH_GBE_DMA_PADDING 2
26#define PCH_GBE_WATCHDOG_PERIOD (5 * HZ)
27#define PCH_GBE_PCI_BAR 1
28#define PCH_GBE_RESERVE_MEMORY 0x200000
29
30#define PCI_DEVICE_ID_INTEL_IOH1_GBE 0x8802
31
32#define PCI_DEVICE_ID_ROHM_ML7223_GBE 0x8013
33#define PCI_DEVICE_ID_ROHM_ML7831_GBE 0x8802
34
35#define PCH_GBE_TX_WEIGHT 64
36#define PCH_GBE_RX_WEIGHT 64
37#define PCH_GBE_RX_BUFFER_WRITE 16
38
39
40#define PCH_GBE_WL_INIT_SETTING (PCH_GBE_WLC_MP)
41
42#define PCH_GBE_MAC_RGMII_CTRL_SETTING ( \
43 PCH_GBE_CHIP_TYPE_INTERNAL | \
44 PCH_GBE_RGMII_MODE_RGMII \
45 )
46
47
48#define PCH_GBE_MAX_RX_BUFFER_SIZE 0x2880
49#define PCH_GBE_MAX_JUMBO_FRAME_SIZE 10318
50#define PCH_GBE_FRAME_SIZE_2048 2048
51#define PCH_GBE_FRAME_SIZE_4096 4096
52#define PCH_GBE_FRAME_SIZE_8192 8192
53
54#define PCH_GBE_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i]))
55#define PCH_GBE_RX_DESC(R, i) PCH_GBE_GET_DESC(R, i, pch_gbe_rx_desc)
56#define PCH_GBE_TX_DESC(R, i) PCH_GBE_GET_DESC(R, i, pch_gbe_tx_desc)
57#define PCH_GBE_DESC_UNUSED(R) \
58 ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
59 (R)->next_to_clean - (R)->next_to_use - 1)
60
61
62#define PCH_GBE_PAUSE_PKT1_VALUE 0x00C28001
63#define PCH_GBE_PAUSE_PKT2_VALUE 0x00000100
64#define PCH_GBE_PAUSE_PKT4_VALUE 0x01000888
65#define PCH_GBE_PAUSE_PKT5_VALUE 0x0000FFFF
66
67
68
69
70
71
72
73
74
75
76#define PCH_GBE_INT_ENABLE_MASK ( \
77 PCH_GBE_INT_RX_DMA_CMPLT | \
78 PCH_GBE_INT_RX_DSC_EMP | \
79 PCH_GBE_INT_RX_FIFO_ERR | \
80 PCH_GBE_INT_WOL_DET | \
81 PCH_GBE_INT_TX_CMPLT \
82 )
83
84#define PCH_GBE_INT_DISABLE_ALL 0
85
86
87
88#define MASTER_MODE (1<<0)
89#define SLAVE_MODE (0)
90#define V2_MODE (1<<31)
91#define CAP_MODE0 (0)
92#define CAP_MODE2 (1<<17)
93
94
95#define TX_SNAPSHOT_LOCKED (1<<0)
96#define RX_SNAPSHOT_LOCKED (1<<1)
97
98#define PTP_L4_MULTICAST_SA "01:00:5e:00:01:81"
99#define PTP_L2_MULTICAST_SA "01:1b:19:00:00:00"
100
101static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg);
102static void pch_gbe_mdio_write(struct net_device *netdev, int addr, int reg,
103 int data);
104static void pch_gbe_set_multi(struct net_device *netdev);
105
106static int pch_ptp_match(struct sk_buff *skb, u16 uid_hi, u32 uid_lo, u16 seqid)
107{
108 u8 *data = skb->data;
109 unsigned int offset;
110 u16 hi, id;
111 u32 lo;
112
113 if (ptp_classify_raw(skb) == PTP_CLASS_NONE)
114 return 0;
115
116 offset = ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN;
117
118 if (skb->len < offset + OFF_PTP_SEQUENCE_ID + sizeof(seqid))
119 return 0;
120
121 hi = get_unaligned_be16(data + offset + OFF_PTP_SOURCE_UUID + 0);
122 lo = get_unaligned_be32(data + offset + OFF_PTP_SOURCE_UUID + 2);
123 id = get_unaligned_be16(data + offset + OFF_PTP_SEQUENCE_ID);
124
125 return (uid_hi == hi && uid_lo == lo && seqid == id);
126}
127
128static void
129pch_rx_timestamp(struct pch_gbe_adapter *adapter, struct sk_buff *skb)
130{
131 struct skb_shared_hwtstamps *shhwtstamps;
132 struct pci_dev *pdev;
133 u64 ns;
134 u32 hi, lo, val;
135
136 if (!adapter->hwts_rx_en)
137 return;
138
139
140 pdev = adapter->ptp_pdev;
141
142 val = pch_ch_event_read(pdev);
143
144 if (!(val & RX_SNAPSHOT_LOCKED))
145 return;
146
147 lo = pch_src_uuid_lo_read(pdev);
148 hi = pch_src_uuid_hi_read(pdev);
149
150 if (!pch_ptp_match(skb, hi, lo, hi >> 16))
151 goto out;
152
153 ns = pch_rx_snap_read(pdev);
154
155 shhwtstamps = skb_hwtstamps(skb);
156 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
157 shhwtstamps->hwtstamp = ns_to_ktime(ns);
158out:
159 pch_ch_event_write(pdev, RX_SNAPSHOT_LOCKED);
160}
161
162static void
163pch_tx_timestamp(struct pch_gbe_adapter *adapter, struct sk_buff *skb)
164{
165 struct skb_shared_hwtstamps shhwtstamps;
166 struct pci_dev *pdev;
167 struct skb_shared_info *shtx;
168 u64 ns;
169 u32 cnt, val;
170
171 shtx = skb_shinfo(skb);
172 if (likely(!(shtx->tx_flags & SKBTX_HW_TSTAMP && adapter->hwts_tx_en)))
173 return;
174
175 shtx->tx_flags |= SKBTX_IN_PROGRESS;
176
177
178 pdev = adapter->ptp_pdev;
179
180
181
182
183 for (cnt = 0; cnt < 100; cnt++) {
184 val = pch_ch_event_read(pdev);
185 if (val & TX_SNAPSHOT_LOCKED)
186 break;
187 udelay(1);
188 }
189 if (!(val & TX_SNAPSHOT_LOCKED)) {
190 shtx->tx_flags &= ~SKBTX_IN_PROGRESS;
191 return;
192 }
193
194 ns = pch_tx_snap_read(pdev);
195
196 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
197 shhwtstamps.hwtstamp = ns_to_ktime(ns);
198 skb_tstamp_tx(skb, &shhwtstamps);
199
200 pch_ch_event_write(pdev, TX_SNAPSHOT_LOCKED);
201}
202
203static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
204{
205 struct hwtstamp_config cfg;
206 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
207 struct pci_dev *pdev;
208 u8 station[20];
209
210 if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
211 return -EFAULT;
212
213 if (cfg.flags)
214 return -EINVAL;
215
216
217 pdev = adapter->ptp_pdev;
218
219 if (cfg.tx_type != HWTSTAMP_TX_OFF && cfg.tx_type != HWTSTAMP_TX_ON)
220 return -ERANGE;
221
222 switch (cfg.rx_filter) {
223 case HWTSTAMP_FILTER_NONE:
224 adapter->hwts_rx_en = 0;
225 break;
226 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
227 adapter->hwts_rx_en = 0;
228 pch_ch_control_write(pdev, SLAVE_MODE | CAP_MODE0);
229 break;
230 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
231 adapter->hwts_rx_en = 1;
232 pch_ch_control_write(pdev, MASTER_MODE | CAP_MODE0);
233 break;
234 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
235 adapter->hwts_rx_en = 1;
236 pch_ch_control_write(pdev, V2_MODE | CAP_MODE2);
237 strcpy(station, PTP_L4_MULTICAST_SA);
238 pch_set_station_address(station, pdev);
239 break;
240 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
241 adapter->hwts_rx_en = 1;
242 pch_ch_control_write(pdev, V2_MODE | CAP_MODE2);
243 strcpy(station, PTP_L2_MULTICAST_SA);
244 pch_set_station_address(station, pdev);
245 break;
246 default:
247 return -ERANGE;
248 }
249
250 adapter->hwts_tx_en = cfg.tx_type == HWTSTAMP_TX_ON;
251
252
253 pch_ch_event_write(pdev, TX_SNAPSHOT_LOCKED | RX_SNAPSHOT_LOCKED);
254
255 return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
256}
257
258static inline void pch_gbe_mac_load_mac_addr(struct pch_gbe_hw *hw)
259{
260 iowrite32(0x01, &hw->reg->MAC_ADDR_LOAD);
261}
262
263
264
265
266
267
268
269static s32 pch_gbe_mac_read_mac_addr(struct pch_gbe_hw *hw)
270{
271 struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
272 u32 adr1a, adr1b;
273
274 adr1a = ioread32(&hw->reg->mac_adr[0].high);
275 adr1b = ioread32(&hw->reg->mac_adr[0].low);
276
277 hw->mac.addr[0] = (u8)(adr1a & 0xFF);
278 hw->mac.addr[1] = (u8)((adr1a >> 8) & 0xFF);
279 hw->mac.addr[2] = (u8)((adr1a >> 16) & 0xFF);
280 hw->mac.addr[3] = (u8)((adr1a >> 24) & 0xFF);
281 hw->mac.addr[4] = (u8)(adr1b & 0xFF);
282 hw->mac.addr[5] = (u8)((adr1b >> 8) & 0xFF);
283
284 netdev_dbg(adapter->netdev, "hw->mac.addr : %pM\n", hw->mac.addr);
285 return 0;
286}
287
288
289
290
291
292
293static void pch_gbe_wait_clr_bit(void __iomem *reg, u32 bit)
294{
295 u32 tmp;
296
297
298 if (readx_poll_timeout_atomic(ioread32, reg, tmp, !(tmp & bit), 0, 10))
299 pr_err("Error: busy bit is not cleared\n");
300}
301
302
303
304
305
306
307
308static void pch_gbe_mac_mar_set(struct pch_gbe_hw *hw, u8 * addr, u32 index)
309{
310 struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
311 u32 mar_low, mar_high, adrmask;
312
313 netdev_dbg(adapter->netdev, "index : 0x%x\n", index);
314
315
316
317
318
319 mar_high = ((u32) addr[0] | ((u32) addr[1] << 8) |
320 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
321 mar_low = ((u32) addr[4] | ((u32) addr[5] << 8));
322
323 adrmask = ioread32(&hw->reg->ADDR_MASK);
324 iowrite32((adrmask | (0x0001 << index)), &hw->reg->ADDR_MASK);
325
326 pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
327
328 iowrite32(mar_high, &hw->reg->mac_adr[index].high);
329 iowrite32(mar_low, &hw->reg->mac_adr[index].low);
330
331 iowrite32((adrmask & ~(0x0001 << index)), &hw->reg->ADDR_MASK);
332
333 pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
334}
335
336
337
338
339
340static void pch_gbe_mac_reset_hw(struct pch_gbe_hw *hw)
341{
342
343 pch_gbe_mac_read_mac_addr(hw);
344 iowrite32(PCH_GBE_ALL_RST, &hw->reg->RESET);
345 iowrite32(PCH_GBE_MODE_GMII_ETHER, &hw->reg->MODE);
346 pch_gbe_wait_clr_bit(&hw->reg->RESET, PCH_GBE_ALL_RST);
347
348 pch_gbe_mac_mar_set(hw, hw->mac.addr, 0);
349 return;
350}
351
352static void pch_gbe_disable_mac_rx(struct pch_gbe_hw *hw)
353{
354 u32 rctl;
355
356 rctl = ioread32(&hw->reg->MAC_RX_EN);
357 iowrite32((rctl & ~PCH_GBE_MRE_MAC_RX_EN), &hw->reg->MAC_RX_EN);
358}
359
360static void pch_gbe_enable_mac_rx(struct pch_gbe_hw *hw)
361{
362 u32 rctl;
363
364 rctl = ioread32(&hw->reg->MAC_RX_EN);
365 iowrite32((rctl | PCH_GBE_MRE_MAC_RX_EN), &hw->reg->MAC_RX_EN);
366}
367
368
369
370
371
372
373static void pch_gbe_mac_init_rx_addrs(struct pch_gbe_hw *hw, u16 mar_count)
374{
375 u32 i;
376
377
378 pch_gbe_mac_mar_set(hw, hw->mac.addr, 0);
379
380
381 for (i = 1; i < mar_count; i++) {
382 iowrite32(0, &hw->reg->mac_adr[i].high);
383 iowrite32(0, &hw->reg->mac_adr[i].low);
384 }
385 iowrite32(0xFFFE, &hw->reg->ADDR_MASK);
386
387 pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
388}
389
390
391
392
393
394
395
396
397s32 pch_gbe_mac_force_mac_fc(struct pch_gbe_hw *hw)
398{
399 struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
400 struct pch_gbe_mac_info *mac = &hw->mac;
401 u32 rx_fctrl;
402
403 netdev_dbg(adapter->netdev, "mac->fc = %u\n", mac->fc);
404
405 rx_fctrl = ioread32(&hw->reg->RX_FCTRL);
406
407 switch (mac->fc) {
408 case PCH_GBE_FC_NONE:
409 rx_fctrl &= ~PCH_GBE_FL_CTRL_EN;
410 mac->tx_fc_enable = false;
411 break;
412 case PCH_GBE_FC_RX_PAUSE:
413 rx_fctrl |= PCH_GBE_FL_CTRL_EN;
414 mac->tx_fc_enable = false;
415 break;
416 case PCH_GBE_FC_TX_PAUSE:
417 rx_fctrl &= ~PCH_GBE_FL_CTRL_EN;
418 mac->tx_fc_enable = true;
419 break;
420 case PCH_GBE_FC_FULL:
421 rx_fctrl |= PCH_GBE_FL_CTRL_EN;
422 mac->tx_fc_enable = true;
423 break;
424 default:
425 netdev_err(adapter->netdev,
426 "Flow control param set incorrectly\n");
427 return -EINVAL;
428 }
429 if (mac->link_duplex == DUPLEX_HALF)
430 rx_fctrl &= ~PCH_GBE_FL_CTRL_EN;
431 iowrite32(rx_fctrl, &hw->reg->RX_FCTRL);
432 netdev_dbg(adapter->netdev,
433 "RX_FCTRL reg : 0x%08x mac->tx_fc_enable : %d\n",
434 ioread32(&hw->reg->RX_FCTRL), mac->tx_fc_enable);
435 return 0;
436}
437
438
439
440
441
442
443static void pch_gbe_mac_set_wol_event(struct pch_gbe_hw *hw, u32 wu_evt)
444{
445 struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
446 u32 addr_mask;
447
448 netdev_dbg(adapter->netdev, "wu_evt : 0x%08x ADDR_MASK reg : 0x%08x\n",
449 wu_evt, ioread32(&hw->reg->ADDR_MASK));
450
451 if (wu_evt) {
452
453 addr_mask = ioread32(&hw->reg->ADDR_MASK);
454 iowrite32(addr_mask, &hw->reg->WOL_ADDR_MASK);
455
456 pch_gbe_wait_clr_bit(&hw->reg->WOL_ADDR_MASK, PCH_GBE_WLA_BUSY);
457 iowrite32(0, &hw->reg->WOL_ST);
458 iowrite32((wu_evt | PCH_GBE_WLC_WOL_MODE), &hw->reg->WOL_CTRL);
459 iowrite32(0x02, &hw->reg->TCPIP_ACC);
460 iowrite32(PCH_GBE_INT_ENABLE_MASK, &hw->reg->INT_EN);
461 } else {
462 iowrite32(0, &hw->reg->WOL_CTRL);
463 iowrite32(0, &hw->reg->WOL_ST);
464 }
465 return;
466}
467
468
469
470
471
472
473
474
475
476
477
478u16 pch_gbe_mac_ctrl_miim(struct pch_gbe_hw *hw, u32 addr, u32 dir, u32 reg,
479 u16 data)
480{
481 struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
482 unsigned long flags;
483 u32 data_out;
484
485 spin_lock_irqsave(&hw->miim_lock, flags);
486
487 if (readx_poll_timeout_atomic(ioread32, &hw->reg->MIIM, data_out,
488 data_out & PCH_GBE_MIIM_OPER_READY, 20, 2000)) {
489 netdev_err(adapter->netdev, "pch-gbe.miim won't go Ready\n");
490 spin_unlock_irqrestore(&hw->miim_lock, flags);
491 return 0;
492 }
493 iowrite32(((reg << PCH_GBE_MIIM_REG_ADDR_SHIFT) |
494 (addr << PCH_GBE_MIIM_PHY_ADDR_SHIFT) |
495 dir | data), &hw->reg->MIIM);
496 readx_poll_timeout_atomic(ioread32, &hw->reg->MIIM, data_out,
497 data_out & PCH_GBE_MIIM_OPER_READY, 20, 2000);
498 spin_unlock_irqrestore(&hw->miim_lock, flags);
499
500 netdev_dbg(adapter->netdev, "PHY %s: reg=%d, data=0x%04X\n",
501 dir == PCH_GBE_MIIM_OPER_READ ? "READ" : "WRITE", reg,
502 dir == PCH_GBE_MIIM_OPER_READ ? data_out : data);
503 return (u16) data_out;
504}
505
506
507
508
509
510static void pch_gbe_mac_set_pause_packet(struct pch_gbe_hw *hw)
511{
512 struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
513 unsigned long tmp2, tmp3;
514
515
516 tmp2 = hw->mac.addr[1];
517 tmp2 = (tmp2 << 8) | hw->mac.addr[0];
518 tmp2 = PCH_GBE_PAUSE_PKT2_VALUE | (tmp2 << 16);
519
520 tmp3 = hw->mac.addr[5];
521 tmp3 = (tmp3 << 8) | hw->mac.addr[4];
522 tmp3 = (tmp3 << 8) | hw->mac.addr[3];
523 tmp3 = (tmp3 << 8) | hw->mac.addr[2];
524
525 iowrite32(PCH_GBE_PAUSE_PKT1_VALUE, &hw->reg->PAUSE_PKT1);
526 iowrite32(tmp2, &hw->reg->PAUSE_PKT2);
527 iowrite32(tmp3, &hw->reg->PAUSE_PKT3);
528 iowrite32(PCH_GBE_PAUSE_PKT4_VALUE, &hw->reg->PAUSE_PKT4);
529 iowrite32(PCH_GBE_PAUSE_PKT5_VALUE, &hw->reg->PAUSE_PKT5);
530
531
532 iowrite32(PCH_GBE_PS_PKT_RQ, &hw->reg->PAUSE_REQ);
533
534 netdev_dbg(adapter->netdev,
535 "PAUSE_PKT1-5 reg : 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
536 ioread32(&hw->reg->PAUSE_PKT1),
537 ioread32(&hw->reg->PAUSE_PKT2),
538 ioread32(&hw->reg->PAUSE_PKT3),
539 ioread32(&hw->reg->PAUSE_PKT4),
540 ioread32(&hw->reg->PAUSE_PKT5));
541
542 return;
543}
544
545
546
547
548
549
550
551
552
553static int pch_gbe_alloc_queues(struct pch_gbe_adapter *adapter)
554{
555 adapter->tx_ring = devm_kzalloc(&adapter->pdev->dev,
556 sizeof(*adapter->tx_ring), GFP_KERNEL);
557 if (!adapter->tx_ring)
558 return -ENOMEM;
559
560 adapter->rx_ring = devm_kzalloc(&adapter->pdev->dev,
561 sizeof(*adapter->rx_ring), GFP_KERNEL);
562 if (!adapter->rx_ring)
563 return -ENOMEM;
564 return 0;
565}
566
567
568
569
570
571static void pch_gbe_init_stats(struct pch_gbe_adapter *adapter)
572{
573 memset(&adapter->stats, 0, sizeof(adapter->stats));
574 return;
575}
576
577
578
579
580
581
582
583
584static int pch_gbe_init_phy(struct pch_gbe_adapter *adapter)
585{
586 struct net_device *netdev = adapter->netdev;
587 u32 addr;
588 u16 bmcr, stat;
589
590
591 for (addr = 0; addr < PCH_GBE_PHY_REGS_LEN; addr++) {
592 adapter->mii.phy_id = (addr == 0) ? 1 : (addr == 1) ? 0 : addr;
593 bmcr = pch_gbe_mdio_read(netdev, adapter->mii.phy_id, MII_BMCR);
594 stat = pch_gbe_mdio_read(netdev, adapter->mii.phy_id, MII_BMSR);
595 stat = pch_gbe_mdio_read(netdev, adapter->mii.phy_id, MII_BMSR);
596 if (!((bmcr == 0xFFFF) || ((stat == 0) && (bmcr == 0))))
597 break;
598 }
599 adapter->hw.phy.addr = adapter->mii.phy_id;
600 netdev_dbg(netdev, "phy_addr = %d\n", adapter->mii.phy_id);
601 if (addr == PCH_GBE_PHY_REGS_LEN)
602 return -EAGAIN;
603
604 for (addr = 0; addr < PCH_GBE_PHY_REGS_LEN; addr++) {
605 if (addr != adapter->mii.phy_id) {
606 pch_gbe_mdio_write(netdev, addr, MII_BMCR,
607 BMCR_ISOLATE);
608 } else {
609 bmcr = pch_gbe_mdio_read(netdev, addr, MII_BMCR);
610 pch_gbe_mdio_write(netdev, addr, MII_BMCR,
611 bmcr & ~BMCR_ISOLATE);
612 }
613 }
614
615
616 adapter->mii.phy_id_mask = 0x1F;
617 adapter->mii.reg_num_mask = 0x1F;
618 adapter->mii.dev = adapter->netdev;
619 adapter->mii.mdio_read = pch_gbe_mdio_read;
620 adapter->mii.mdio_write = pch_gbe_mdio_write;
621 adapter->mii.supports_gmii = mii_check_gmii_support(&adapter->mii);
622 return 0;
623}
624
625
626
627
628
629
630
631
632
633
634static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg)
635{
636 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
637 struct pch_gbe_hw *hw = &adapter->hw;
638
639 return pch_gbe_mac_ctrl_miim(hw, addr, PCH_GBE_HAL_MIIM_READ, reg,
640 (u16) 0);
641}
642
643
644
645
646
647
648
649
650static void pch_gbe_mdio_write(struct net_device *netdev,
651 int addr, int reg, int data)
652{
653 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
654 struct pch_gbe_hw *hw = &adapter->hw;
655
656 pch_gbe_mac_ctrl_miim(hw, addr, PCH_GBE_HAL_MIIM_WRITE, reg, data);
657}
658
659
660
661
662
663static void pch_gbe_reset_task(struct work_struct *work)
664{
665 struct pch_gbe_adapter *adapter;
666 adapter = container_of(work, struct pch_gbe_adapter, reset_task);
667
668 rtnl_lock();
669 pch_gbe_reinit_locked(adapter);
670 rtnl_unlock();
671}
672
673
674
675
676
677void pch_gbe_reinit_locked(struct pch_gbe_adapter *adapter)
678{
679 pch_gbe_down(adapter);
680 pch_gbe_up(adapter);
681}
682
683
684
685
686
687void pch_gbe_reset(struct pch_gbe_adapter *adapter)
688{
689 struct net_device *netdev = adapter->netdev;
690 struct pch_gbe_hw *hw = &adapter->hw;
691 s32 ret_val;
692
693 pch_gbe_mac_reset_hw(hw);
694
695 pch_gbe_set_multi(netdev);
696
697 pch_gbe_mac_init_rx_addrs(hw, PCH_GBE_MAR_ENTRIES);
698
699 ret_val = pch_gbe_phy_get_id(hw);
700 if (ret_val) {
701 netdev_err(adapter->netdev, "pch_gbe_phy_get_id error\n");
702 return;
703 }
704 pch_gbe_phy_init_setting(hw);
705
706 pch_gbe_phy_set_rgmii(hw);
707}
708
709
710
711
712
713static void pch_gbe_free_irq(struct pch_gbe_adapter *adapter)
714{
715 struct net_device *netdev = adapter->netdev;
716
717 free_irq(adapter->irq, netdev);
718 pci_free_irq_vectors(adapter->pdev);
719}
720
721
722
723
724
725static void pch_gbe_irq_disable(struct pch_gbe_adapter *adapter)
726{
727 struct pch_gbe_hw *hw = &adapter->hw;
728
729 atomic_inc(&adapter->irq_sem);
730 iowrite32(0, &hw->reg->INT_EN);
731 ioread32(&hw->reg->INT_ST);
732 synchronize_irq(adapter->irq);
733
734 netdev_dbg(adapter->netdev, "INT_EN reg : 0x%08x\n",
735 ioread32(&hw->reg->INT_EN));
736}
737
738
739
740
741
742static void pch_gbe_irq_enable(struct pch_gbe_adapter *adapter)
743{
744 struct pch_gbe_hw *hw = &adapter->hw;
745
746 if (likely(atomic_dec_and_test(&adapter->irq_sem)))
747 iowrite32(PCH_GBE_INT_ENABLE_MASK, &hw->reg->INT_EN);
748 ioread32(&hw->reg->INT_ST);
749 netdev_dbg(adapter->netdev, "INT_EN reg : 0x%08x\n",
750 ioread32(&hw->reg->INT_EN));
751}
752
753
754
755
756
757
758
759static void pch_gbe_setup_tctl(struct pch_gbe_adapter *adapter)
760{
761 struct pch_gbe_hw *hw = &adapter->hw;
762 u32 tx_mode, tcpip;
763
764 tx_mode = PCH_GBE_TM_LONG_PKT |
765 PCH_GBE_TM_ST_AND_FD |
766 PCH_GBE_TM_SHORT_PKT |
767 PCH_GBE_TM_TH_TX_STRT_8 |
768 PCH_GBE_TM_TH_ALM_EMP_4 | PCH_GBE_TM_TH_ALM_FULL_8;
769
770 iowrite32(tx_mode, &hw->reg->TX_MODE);
771
772 tcpip = ioread32(&hw->reg->TCPIP_ACC);
773 tcpip |= PCH_GBE_TX_TCPIPACC_EN;
774 iowrite32(tcpip, &hw->reg->TCPIP_ACC);
775 return;
776}
777
778
779
780
781
782static void pch_gbe_configure_tx(struct pch_gbe_adapter *adapter)
783{
784 struct pch_gbe_hw *hw = &adapter->hw;
785 u32 tdba, tdlen, dctrl;
786
787 netdev_dbg(adapter->netdev, "dma addr = 0x%08llx size = 0x%08x\n",
788 (unsigned long long)adapter->tx_ring->dma,
789 adapter->tx_ring->size);
790
791
792 tdba = adapter->tx_ring->dma;
793 tdlen = adapter->tx_ring->size - 0x10;
794 iowrite32(tdba, &hw->reg->TX_DSC_BASE);
795 iowrite32(tdlen, &hw->reg->TX_DSC_SIZE);
796 iowrite32(tdba, &hw->reg->TX_DSC_SW_P);
797
798
799 dctrl = ioread32(&hw->reg->DMA_CTRL);
800 dctrl |= PCH_GBE_TX_DMA_EN;
801 iowrite32(dctrl, &hw->reg->DMA_CTRL);
802}
803
804
805
806
807
808static void pch_gbe_setup_rctl(struct pch_gbe_adapter *adapter)
809{
810 struct pch_gbe_hw *hw = &adapter->hw;
811 u32 rx_mode, tcpip;
812
813 rx_mode = PCH_GBE_ADD_FIL_EN | PCH_GBE_MLT_FIL_EN |
814 PCH_GBE_RH_ALM_EMP_4 | PCH_GBE_RH_ALM_FULL_4 | PCH_GBE_RH_RD_TRG_8;
815
816 iowrite32(rx_mode, &hw->reg->RX_MODE);
817
818 tcpip = ioread32(&hw->reg->TCPIP_ACC);
819
820 tcpip |= PCH_GBE_RX_TCPIPACC_OFF;
821 tcpip &= ~PCH_GBE_RX_TCPIPACC_EN;
822 iowrite32(tcpip, &hw->reg->TCPIP_ACC);
823 return;
824}
825
826
827
828
829
830static void pch_gbe_configure_rx(struct pch_gbe_adapter *adapter)
831{
832 struct pch_gbe_hw *hw = &adapter->hw;
833 u32 rdba, rdlen, rxdma;
834
835 netdev_dbg(adapter->netdev, "dma adr = 0x%08llx size = 0x%08x\n",
836 (unsigned long long)adapter->rx_ring->dma,
837 adapter->rx_ring->size);
838
839 pch_gbe_mac_force_mac_fc(hw);
840
841 pch_gbe_disable_mac_rx(hw);
842
843
844 rxdma = ioread32(&hw->reg->DMA_CTRL);
845 rxdma &= ~PCH_GBE_RX_DMA_EN;
846 iowrite32(rxdma, &hw->reg->DMA_CTRL);
847
848 netdev_dbg(adapter->netdev,
849 "MAC_RX_EN reg = 0x%08x DMA_CTRL reg = 0x%08x\n",
850 ioread32(&hw->reg->MAC_RX_EN),
851 ioread32(&hw->reg->DMA_CTRL));
852
853
854
855 rdba = adapter->rx_ring->dma;
856 rdlen = adapter->rx_ring->size - 0x10;
857 iowrite32(rdba, &hw->reg->RX_DSC_BASE);
858 iowrite32(rdlen, &hw->reg->RX_DSC_SIZE);
859 iowrite32((rdba + rdlen), &hw->reg->RX_DSC_SW_P);
860}
861
862
863
864
865
866
867static void pch_gbe_unmap_and_free_tx_resource(
868 struct pch_gbe_adapter *adapter, struct pch_gbe_buffer *buffer_info)
869{
870 if (buffer_info->mapped) {
871 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
872 buffer_info->length, DMA_TO_DEVICE);
873 buffer_info->mapped = false;
874 }
875 if (buffer_info->skb) {
876 dev_kfree_skb_any(buffer_info->skb);
877 buffer_info->skb = NULL;
878 }
879}
880
881
882
883
884
885
886static void pch_gbe_unmap_and_free_rx_resource(
887 struct pch_gbe_adapter *adapter,
888 struct pch_gbe_buffer *buffer_info)
889{
890 if (buffer_info->mapped) {
891 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
892 buffer_info->length, DMA_FROM_DEVICE);
893 buffer_info->mapped = false;
894 }
895 if (buffer_info->skb) {
896 dev_kfree_skb_any(buffer_info->skb);
897 buffer_info->skb = NULL;
898 }
899}
900
901
902
903
904
905
906static void pch_gbe_clean_tx_ring(struct pch_gbe_adapter *adapter,
907 struct pch_gbe_tx_ring *tx_ring)
908{
909 struct pch_gbe_hw *hw = &adapter->hw;
910 struct pch_gbe_buffer *buffer_info;
911 unsigned long size;
912 unsigned int i;
913
914
915 for (i = 0; i < tx_ring->count; i++) {
916 buffer_info = &tx_ring->buffer_info[i];
917 pch_gbe_unmap_and_free_tx_resource(adapter, buffer_info);
918 }
919 netdev_dbg(adapter->netdev,
920 "call pch_gbe_unmap_and_free_tx_resource() %d count\n", i);
921
922 size = (unsigned long)sizeof(struct pch_gbe_buffer) * tx_ring->count;
923 memset(tx_ring->buffer_info, 0, size);
924
925
926 memset(tx_ring->desc, 0, tx_ring->size);
927 tx_ring->next_to_use = 0;
928 tx_ring->next_to_clean = 0;
929 iowrite32(tx_ring->dma, &hw->reg->TX_DSC_HW_P);
930 iowrite32((tx_ring->size - 0x10), &hw->reg->TX_DSC_SIZE);
931}
932
933
934
935
936
937
938static void
939pch_gbe_clean_rx_ring(struct pch_gbe_adapter *adapter,
940 struct pch_gbe_rx_ring *rx_ring)
941{
942 struct pch_gbe_hw *hw = &adapter->hw;
943 struct pch_gbe_buffer *buffer_info;
944 unsigned long size;
945 unsigned int i;
946
947
948 for (i = 0; i < rx_ring->count; i++) {
949 buffer_info = &rx_ring->buffer_info[i];
950 pch_gbe_unmap_and_free_rx_resource(adapter, buffer_info);
951 }
952 netdev_dbg(adapter->netdev,
953 "call pch_gbe_unmap_and_free_rx_resource() %d count\n", i);
954 size = (unsigned long)sizeof(struct pch_gbe_buffer) * rx_ring->count;
955 memset(rx_ring->buffer_info, 0, size);
956
957
958 memset(rx_ring->desc, 0, rx_ring->size);
959 rx_ring->next_to_clean = 0;
960 rx_ring->next_to_use = 0;
961 iowrite32(rx_ring->dma, &hw->reg->RX_DSC_HW_P);
962 iowrite32((rx_ring->size - 0x10), &hw->reg->RX_DSC_SIZE);
963}
964
965static void pch_gbe_set_rgmii_ctrl(struct pch_gbe_adapter *adapter, u16 speed,
966 u16 duplex)
967{
968 struct pch_gbe_hw *hw = &adapter->hw;
969 unsigned long rgmii = 0;
970
971
972 switch (speed) {
973 case SPEED_10:
974 rgmii = (PCH_GBE_RGMII_RATE_2_5M |
975 PCH_GBE_MAC_RGMII_CTRL_SETTING);
976 break;
977 case SPEED_100:
978 rgmii = (PCH_GBE_RGMII_RATE_25M |
979 PCH_GBE_MAC_RGMII_CTRL_SETTING);
980 break;
981 case SPEED_1000:
982 rgmii = (PCH_GBE_RGMII_RATE_125M |
983 PCH_GBE_MAC_RGMII_CTRL_SETTING);
984 break;
985 }
986 iowrite32(rgmii, &hw->reg->RGMII_CTRL);
987}
988static void pch_gbe_set_mode(struct pch_gbe_adapter *adapter, u16 speed,
989 u16 duplex)
990{
991 struct net_device *netdev = adapter->netdev;
992 struct pch_gbe_hw *hw = &adapter->hw;
993 unsigned long mode = 0;
994
995
996 switch (speed) {
997 case SPEED_10:
998 mode = PCH_GBE_MODE_MII_ETHER;
999 netdev->tx_queue_len = 10;
1000 break;
1001 case SPEED_100:
1002 mode = PCH_GBE_MODE_MII_ETHER;
1003 netdev->tx_queue_len = 100;
1004 break;
1005 case SPEED_1000:
1006 mode = PCH_GBE_MODE_GMII_ETHER;
1007 break;
1008 }
1009 if (duplex == DUPLEX_FULL)
1010 mode |= PCH_GBE_MODE_FULL_DUPLEX;
1011 else
1012 mode |= PCH_GBE_MODE_HALF_DUPLEX;
1013 iowrite32(mode, &hw->reg->MODE);
1014}
1015
1016
1017
1018
1019
1020static void pch_gbe_watchdog(struct timer_list *t)
1021{
1022 struct pch_gbe_adapter *adapter = from_timer(adapter, t,
1023 watchdog_timer);
1024 struct net_device *netdev = adapter->netdev;
1025 struct pch_gbe_hw *hw = &adapter->hw;
1026
1027 netdev_dbg(netdev, "right now = %ld\n", jiffies);
1028
1029 pch_gbe_update_stats(adapter);
1030 if ((mii_link_ok(&adapter->mii)) && (!netif_carrier_ok(netdev))) {
1031 struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET };
1032 netdev->tx_queue_len = adapter->tx_queue_len;
1033
1034 mii_ethtool_gset(&adapter->mii, &cmd);
1035 hw->mac.link_speed = ethtool_cmd_speed(&cmd);
1036 hw->mac.link_duplex = cmd.duplex;
1037
1038 pch_gbe_set_rgmii_ctrl(adapter, hw->mac.link_speed,
1039 hw->mac.link_duplex);
1040
1041 pch_gbe_set_mode(adapter, hw->mac.link_speed,
1042 hw->mac.link_duplex);
1043 netdev_dbg(netdev,
1044 "Link is Up %d Mbps %s-Duplex\n",
1045 hw->mac.link_speed,
1046 cmd.duplex == DUPLEX_FULL ? "Full" : "Half");
1047 netif_carrier_on(netdev);
1048 netif_wake_queue(netdev);
1049 } else if ((!mii_link_ok(&adapter->mii)) &&
1050 (netif_carrier_ok(netdev))) {
1051 netdev_dbg(netdev, "NIC Link is Down\n");
1052 hw->mac.link_speed = SPEED_10;
1053 hw->mac.link_duplex = DUPLEX_HALF;
1054 netif_carrier_off(netdev);
1055 netif_stop_queue(netdev);
1056 }
1057 mod_timer(&adapter->watchdog_timer,
1058 round_jiffies(jiffies + PCH_GBE_WATCHDOG_PERIOD));
1059}
1060
1061
1062
1063
1064
1065
1066
1067static void pch_gbe_tx_queue(struct pch_gbe_adapter *adapter,
1068 struct pch_gbe_tx_ring *tx_ring,
1069 struct sk_buff *skb)
1070{
1071 struct pch_gbe_hw *hw = &adapter->hw;
1072 struct pch_gbe_tx_desc *tx_desc;
1073 struct pch_gbe_buffer *buffer_info;
1074 struct sk_buff *tmp_skb;
1075 unsigned int frame_ctrl;
1076 unsigned int ring_num;
1077
1078
1079 frame_ctrl = 0;
1080 if (unlikely(skb->len < PCH_GBE_SHORT_PKT))
1081 frame_ctrl |= PCH_GBE_TXD_CTRL_APAD;
1082 if (skb->ip_summed == CHECKSUM_NONE)
1083 frame_ctrl |= PCH_GBE_TXD_CTRL_TCPIP_ACC_OFF;
1084
1085
1086
1087
1088
1089
1090 if (skb->len < PCH_GBE_SHORT_PKT && skb->ip_summed != CHECKSUM_NONE) {
1091 frame_ctrl |= PCH_GBE_TXD_CTRL_APAD |
1092 PCH_GBE_TXD_CTRL_TCPIP_ACC_OFF;
1093 if (skb->protocol == htons(ETH_P_IP)) {
1094 struct iphdr *iph = ip_hdr(skb);
1095 unsigned int offset;
1096 offset = skb_transport_offset(skb);
1097 if (iph->protocol == IPPROTO_TCP) {
1098 skb->csum = 0;
1099 tcp_hdr(skb)->check = 0;
1100 skb->csum = skb_checksum(skb, offset,
1101 skb->len - offset, 0);
1102 tcp_hdr(skb)->check =
1103 csum_tcpudp_magic(iph->saddr,
1104 iph->daddr,
1105 skb->len - offset,
1106 IPPROTO_TCP,
1107 skb->csum);
1108 } else if (iph->protocol == IPPROTO_UDP) {
1109 skb->csum = 0;
1110 udp_hdr(skb)->check = 0;
1111 skb->csum =
1112 skb_checksum(skb, offset,
1113 skb->len - offset, 0);
1114 udp_hdr(skb)->check =
1115 csum_tcpudp_magic(iph->saddr,
1116 iph->daddr,
1117 skb->len - offset,
1118 IPPROTO_UDP,
1119 skb->csum);
1120 }
1121 }
1122 }
1123
1124 ring_num = tx_ring->next_to_use;
1125 if (unlikely((ring_num + 1) == tx_ring->count))
1126 tx_ring->next_to_use = 0;
1127 else
1128 tx_ring->next_to_use = ring_num + 1;
1129
1130
1131 buffer_info = &tx_ring->buffer_info[ring_num];
1132 tmp_skb = buffer_info->skb;
1133
1134
1135 memcpy(tmp_skb->data, skb->data, ETH_HLEN);
1136 tmp_skb->data[ETH_HLEN] = 0x00;
1137 tmp_skb->data[ETH_HLEN + 1] = 0x00;
1138 tmp_skb->len = skb->len;
1139 memcpy(&tmp_skb->data[ETH_HLEN + 2], &skb->data[ETH_HLEN],
1140 (skb->len - ETH_HLEN));
1141
1142 buffer_info->length = tmp_skb->len;
1143 buffer_info->dma = dma_map_single(&adapter->pdev->dev, tmp_skb->data,
1144 buffer_info->length,
1145 DMA_TO_DEVICE);
1146 if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) {
1147 netdev_err(adapter->netdev, "TX DMA map failed\n");
1148 buffer_info->dma = 0;
1149 buffer_info->time_stamp = 0;
1150 tx_ring->next_to_use = ring_num;
1151 return;
1152 }
1153 buffer_info->mapped = true;
1154 buffer_info->time_stamp = jiffies;
1155
1156
1157 tx_desc = PCH_GBE_TX_DESC(*tx_ring, ring_num);
1158 tx_desc->buffer_addr = (buffer_info->dma);
1159 tx_desc->length = (tmp_skb->len);
1160 tx_desc->tx_words_eob = ((tmp_skb->len + 3));
1161 tx_desc->tx_frame_ctrl = (frame_ctrl);
1162 tx_desc->gbec_status = (DSC_INIT16);
1163
1164 if (unlikely(++ring_num == tx_ring->count))
1165 ring_num = 0;
1166
1167
1168 iowrite32(tx_ring->dma +
1169 (int)sizeof(struct pch_gbe_tx_desc) * ring_num,
1170 &hw->reg->TX_DSC_SW_P);
1171
1172 pch_tx_timestamp(adapter, skb);
1173
1174 dev_kfree_skb_any(skb);
1175}
1176
1177
1178
1179
1180
1181void pch_gbe_update_stats(struct pch_gbe_adapter *adapter)
1182{
1183 struct net_device *netdev = adapter->netdev;
1184 struct pci_dev *pdev = adapter->pdev;
1185 struct pch_gbe_hw_stats *stats = &adapter->stats;
1186 unsigned long flags;
1187
1188
1189
1190
1191
1192 if ((pdev->error_state) && (pdev->error_state != pci_channel_io_normal))
1193 return;
1194
1195 spin_lock_irqsave(&adapter->stats_lock, flags);
1196
1197
1198 stats->rx_errors = stats->rx_crc_errors + stats->rx_frame_errors;
1199 stats->tx_errors = stats->tx_length_errors +
1200 stats->tx_aborted_errors +
1201 stats->tx_carrier_errors + stats->tx_timeout_count;
1202
1203
1204 netdev->stats.rx_packets = stats->rx_packets;
1205 netdev->stats.rx_bytes = stats->rx_bytes;
1206 netdev->stats.rx_dropped = stats->rx_dropped;
1207 netdev->stats.tx_packets = stats->tx_packets;
1208 netdev->stats.tx_bytes = stats->tx_bytes;
1209 netdev->stats.tx_dropped = stats->tx_dropped;
1210
1211 netdev->stats.multicast = stats->multicast;
1212 netdev->stats.collisions = stats->collisions;
1213
1214 netdev->stats.rx_errors = stats->rx_errors;
1215 netdev->stats.rx_crc_errors = stats->rx_crc_errors;
1216 netdev->stats.rx_frame_errors = stats->rx_frame_errors;
1217
1218 netdev->stats.tx_errors = stats->tx_errors;
1219 netdev->stats.tx_aborted_errors = stats->tx_aborted_errors;
1220 netdev->stats.tx_carrier_errors = stats->tx_carrier_errors;
1221
1222 spin_unlock_irqrestore(&adapter->stats_lock, flags);
1223}
1224
1225static void pch_gbe_disable_dma_rx(struct pch_gbe_hw *hw)
1226{
1227 u32 rxdma;
1228
1229
1230 rxdma = ioread32(&hw->reg->DMA_CTRL);
1231 rxdma &= ~PCH_GBE_RX_DMA_EN;
1232 iowrite32(rxdma, &hw->reg->DMA_CTRL);
1233}
1234
1235static void pch_gbe_enable_dma_rx(struct pch_gbe_hw *hw)
1236{
1237 u32 rxdma;
1238
1239
1240 rxdma = ioread32(&hw->reg->DMA_CTRL);
1241 rxdma |= PCH_GBE_RX_DMA_EN;
1242 iowrite32(rxdma, &hw->reg->DMA_CTRL);
1243}
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253static irqreturn_t pch_gbe_intr(int irq, void *data)
1254{
1255 struct net_device *netdev = data;
1256 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
1257 struct pch_gbe_hw *hw = &adapter->hw;
1258 u32 int_st;
1259 u32 int_en;
1260
1261
1262 int_st = ioread32(&hw->reg->INT_ST);
1263 int_st = int_st & ioread32(&hw->reg->INT_EN);
1264
1265 if (unlikely(!int_st))
1266 return IRQ_NONE;
1267 netdev_dbg(netdev, "%s occur int_st = 0x%08x\n", __func__, int_st);
1268 if (int_st & PCH_GBE_INT_RX_FRAME_ERR)
1269 adapter->stats.intr_rx_frame_err_count++;
1270 if (int_st & PCH_GBE_INT_RX_FIFO_ERR)
1271 if (!adapter->rx_stop_flag) {
1272 adapter->stats.intr_rx_fifo_err_count++;
1273 netdev_dbg(netdev, "Rx fifo over run\n");
1274 adapter->rx_stop_flag = true;
1275 int_en = ioread32(&hw->reg->INT_EN);
1276 iowrite32((int_en & ~PCH_GBE_INT_RX_FIFO_ERR),
1277 &hw->reg->INT_EN);
1278 pch_gbe_disable_dma_rx(&adapter->hw);
1279 int_st |= ioread32(&hw->reg->INT_ST);
1280 int_st = int_st & ioread32(&hw->reg->INT_EN);
1281 }
1282 if (int_st & PCH_GBE_INT_RX_DMA_ERR)
1283 adapter->stats.intr_rx_dma_err_count++;
1284 if (int_st & PCH_GBE_INT_TX_FIFO_ERR)
1285 adapter->stats.intr_tx_fifo_err_count++;
1286 if (int_st & PCH_GBE_INT_TX_DMA_ERR)
1287 adapter->stats.intr_tx_dma_err_count++;
1288 if (int_st & PCH_GBE_INT_TCPIP_ERR)
1289 adapter->stats.intr_tcpip_err_count++;
1290
1291 if ((int_st & PCH_GBE_INT_RX_DSC_EMP)) {
1292 adapter->stats.intr_rx_dsc_empty_count++;
1293 netdev_dbg(netdev, "Rx descriptor is empty\n");
1294 int_en = ioread32(&hw->reg->INT_EN);
1295 iowrite32((int_en & ~PCH_GBE_INT_RX_DSC_EMP), &hw->reg->INT_EN);
1296 if (hw->mac.tx_fc_enable) {
1297
1298 pch_gbe_mac_set_pause_packet(hw);
1299 }
1300 }
1301
1302
1303 if ((int_st & (PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT)) ||
1304 (adapter->rx_stop_flag)) {
1305 if (likely(napi_schedule_prep(&adapter->napi))) {
1306
1307 atomic_inc(&adapter->irq_sem);
1308 int_en = ioread32(&hw->reg->INT_EN);
1309 int_en &=
1310 ~(PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT);
1311 iowrite32(int_en, &hw->reg->INT_EN);
1312
1313 __napi_schedule(&adapter->napi);
1314 }
1315 }
1316 netdev_dbg(netdev, "return = 0x%08x INT_EN reg = 0x%08x\n",
1317 IRQ_HANDLED, ioread32(&hw->reg->INT_EN));
1318 return IRQ_HANDLED;
1319}
1320
1321
1322
1323
1324
1325
1326
1327static void
1328pch_gbe_alloc_rx_buffers(struct pch_gbe_adapter *adapter,
1329 struct pch_gbe_rx_ring *rx_ring, int cleaned_count)
1330{
1331 struct net_device *netdev = adapter->netdev;
1332 struct pci_dev *pdev = adapter->pdev;
1333 struct pch_gbe_hw *hw = &adapter->hw;
1334 struct pch_gbe_rx_desc *rx_desc;
1335 struct pch_gbe_buffer *buffer_info;
1336 struct sk_buff *skb;
1337 unsigned int i;
1338 unsigned int bufsz;
1339
1340 bufsz = adapter->rx_buffer_len + NET_IP_ALIGN;
1341 i = rx_ring->next_to_use;
1342
1343 while ((cleaned_count--)) {
1344 buffer_info = &rx_ring->buffer_info[i];
1345 skb = netdev_alloc_skb(netdev, bufsz);
1346 if (unlikely(!skb)) {
1347
1348 adapter->stats.rx_alloc_buff_failed++;
1349 break;
1350 }
1351
1352 skb_reserve(skb, NET_IP_ALIGN);
1353 buffer_info->skb = skb;
1354
1355 buffer_info->dma = dma_map_single(&pdev->dev,
1356 buffer_info->rx_buffer,
1357 buffer_info->length,
1358 DMA_FROM_DEVICE);
1359 if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) {
1360 dev_kfree_skb(skb);
1361 buffer_info->skb = NULL;
1362 buffer_info->dma = 0;
1363 adapter->stats.rx_alloc_buff_failed++;
1364 break;
1365 }
1366 buffer_info->mapped = true;
1367 rx_desc = PCH_GBE_RX_DESC(*rx_ring, i);
1368 rx_desc->buffer_addr = (buffer_info->dma);
1369 rx_desc->gbec_status = DSC_INIT16;
1370
1371 netdev_dbg(netdev,
1372 "i = %d buffer_info->dma = 0x08%llx buffer_info->length = 0x%x\n",
1373 i, (unsigned long long)buffer_info->dma,
1374 buffer_info->length);
1375
1376 if (unlikely(++i == rx_ring->count))
1377 i = 0;
1378 }
1379 if (likely(rx_ring->next_to_use != i)) {
1380 rx_ring->next_to_use = i;
1381 if (unlikely(i-- == 0))
1382 i = (rx_ring->count - 1);
1383 iowrite32(rx_ring->dma +
1384 (int)sizeof(struct pch_gbe_rx_desc) * i,
1385 &hw->reg->RX_DSC_SW_P);
1386 }
1387 return;
1388}
1389
1390static int
1391pch_gbe_alloc_rx_buffers_pool(struct pch_gbe_adapter *adapter,
1392 struct pch_gbe_rx_ring *rx_ring, int cleaned_count)
1393{
1394 struct pci_dev *pdev = adapter->pdev;
1395 struct pch_gbe_buffer *buffer_info;
1396 unsigned int i;
1397 unsigned int bufsz;
1398 unsigned int size;
1399
1400 bufsz = adapter->rx_buffer_len;
1401
1402 size = rx_ring->count * bufsz + PCH_GBE_RESERVE_MEMORY;
1403 rx_ring->rx_buff_pool =
1404 dma_alloc_coherent(&pdev->dev, size,
1405 &rx_ring->rx_buff_pool_logic, GFP_KERNEL);
1406 if (!rx_ring->rx_buff_pool)
1407 return -ENOMEM;
1408
1409 rx_ring->rx_buff_pool_size = size;
1410 for (i = 0; i < rx_ring->count; i++) {
1411 buffer_info = &rx_ring->buffer_info[i];
1412 buffer_info->rx_buffer = rx_ring->rx_buff_pool + bufsz * i;
1413 buffer_info->length = bufsz;
1414 }
1415 return 0;
1416}
1417
1418
1419
1420
1421
1422
1423static void pch_gbe_alloc_tx_buffers(struct pch_gbe_adapter *adapter,
1424 struct pch_gbe_tx_ring *tx_ring)
1425{
1426 struct pch_gbe_buffer *buffer_info;
1427 struct sk_buff *skb;
1428 unsigned int i;
1429 unsigned int bufsz;
1430 struct pch_gbe_tx_desc *tx_desc;
1431
1432 bufsz =
1433 adapter->hw.mac.max_frame_size + PCH_GBE_DMA_ALIGN + NET_IP_ALIGN;
1434
1435 for (i = 0; i < tx_ring->count; i++) {
1436 buffer_info = &tx_ring->buffer_info[i];
1437 skb = netdev_alloc_skb(adapter->netdev, bufsz);
1438 skb_reserve(skb, PCH_GBE_DMA_ALIGN);
1439 buffer_info->skb = skb;
1440 tx_desc = PCH_GBE_TX_DESC(*tx_ring, i);
1441 tx_desc->gbec_status = (DSC_INIT16);
1442 }
1443 return;
1444}
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454static bool
1455pch_gbe_clean_tx(struct pch_gbe_adapter *adapter,
1456 struct pch_gbe_tx_ring *tx_ring)
1457{
1458 struct pch_gbe_tx_desc *tx_desc;
1459 struct pch_gbe_buffer *buffer_info;
1460 struct sk_buff *skb;
1461 unsigned int i;
1462 unsigned int cleaned_count = 0;
1463 bool cleaned = false;
1464 int unused, thresh;
1465
1466 netdev_dbg(adapter->netdev, "next_to_clean : %d\n",
1467 tx_ring->next_to_clean);
1468
1469 i = tx_ring->next_to_clean;
1470 tx_desc = PCH_GBE_TX_DESC(*tx_ring, i);
1471 netdev_dbg(adapter->netdev, "gbec_status:0x%04x dma_status:0x%04x\n",
1472 tx_desc->gbec_status, tx_desc->dma_status);
1473
1474 unused = PCH_GBE_DESC_UNUSED(tx_ring);
1475 thresh = tx_ring->count - PCH_GBE_TX_WEIGHT;
1476 if ((tx_desc->gbec_status == DSC_INIT16) && (unused < thresh))
1477 {
1478 int j, k;
1479 if (unused < 8) {
1480 netdev_dbg(adapter->netdev,
1481 "clean_tx: transmit queue warning (%x,%x) unused=%d\n",
1482 tx_ring->next_to_clean, tx_ring->next_to_use,
1483 unused);
1484 }
1485
1486
1487 k = i;
1488 for (j = 0; j < PCH_GBE_TX_WEIGHT; j++)
1489 {
1490 tx_desc = PCH_GBE_TX_DESC(*tx_ring, k);
1491 if (tx_desc->gbec_status != DSC_INIT16) break;
1492 if (++k >= tx_ring->count) k = 0;
1493 }
1494 if (j < PCH_GBE_TX_WEIGHT) {
1495 netdev_dbg(adapter->netdev,
1496 "clean_tx: unused=%d loops=%d found tx_desc[%x,%x:%x].gbec_status=%04x\n",
1497 unused, j, i, k, tx_ring->next_to_use,
1498 tx_desc->gbec_status);
1499 i = k;
1500 }
1501 }
1502
1503 while ((tx_desc->gbec_status & DSC_INIT16) == 0x0000) {
1504 netdev_dbg(adapter->netdev, "gbec_status:0x%04x\n",
1505 tx_desc->gbec_status);
1506 buffer_info = &tx_ring->buffer_info[i];
1507 skb = buffer_info->skb;
1508 cleaned = true;
1509
1510 if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_ABT)) {
1511 adapter->stats.tx_aborted_errors++;
1512 netdev_err(adapter->netdev, "Transfer Abort Error\n");
1513 } else if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_CRSER)
1514 ) {
1515 adapter->stats.tx_carrier_errors++;
1516 netdev_err(adapter->netdev,
1517 "Transfer Carrier Sense Error\n");
1518 } else if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_EXCOL)
1519 ) {
1520 adapter->stats.tx_aborted_errors++;
1521 netdev_err(adapter->netdev,
1522 "Transfer Collision Abort Error\n");
1523 } else if ((tx_desc->gbec_status &
1524 (PCH_GBE_TXD_GMAC_STAT_SNGCOL |
1525 PCH_GBE_TXD_GMAC_STAT_MLTCOL))) {
1526 adapter->stats.collisions++;
1527 adapter->stats.tx_packets++;
1528 adapter->stats.tx_bytes += skb->len;
1529 netdev_dbg(adapter->netdev, "Transfer Collision\n");
1530 } else if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_CMPLT)
1531 ) {
1532 adapter->stats.tx_packets++;
1533 adapter->stats.tx_bytes += skb->len;
1534 }
1535 if (buffer_info->mapped) {
1536 netdev_dbg(adapter->netdev,
1537 "unmap buffer_info->dma : %d\n", i);
1538 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
1539 buffer_info->length, DMA_TO_DEVICE);
1540 buffer_info->mapped = false;
1541 }
1542 if (buffer_info->skb) {
1543 netdev_dbg(adapter->netdev,
1544 "trim buffer_info->skb : %d\n", i);
1545 skb_trim(buffer_info->skb, 0);
1546 }
1547 tx_desc->gbec_status = DSC_INIT16;
1548 if (unlikely(++i == tx_ring->count))
1549 i = 0;
1550 tx_desc = PCH_GBE_TX_DESC(*tx_ring, i);
1551
1552
1553 if (cleaned_count++ == PCH_GBE_TX_WEIGHT) {
1554 cleaned = false;
1555 break;
1556 }
1557 }
1558 netdev_dbg(adapter->netdev,
1559 "called pch_gbe_unmap_and_free_tx_resource() %d count\n",
1560 cleaned_count);
1561 if (cleaned_count > 0) {
1562
1563 netif_tx_lock(adapter->netdev);
1564 if (unlikely(cleaned && (netif_queue_stopped(adapter->netdev))))
1565 {
1566 netif_wake_queue(adapter->netdev);
1567 adapter->stats.tx_restart_count++;
1568 netdev_dbg(adapter->netdev, "Tx wake queue\n");
1569 }
1570
1571 tx_ring->next_to_clean = i;
1572
1573 netdev_dbg(adapter->netdev, "next_to_clean : %d\n",
1574 tx_ring->next_to_clean);
1575 netif_tx_unlock(adapter->netdev);
1576 }
1577 return cleaned;
1578}
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590static bool
1591pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
1592 struct pch_gbe_rx_ring *rx_ring,
1593 int *work_done, int work_to_do)
1594{
1595 struct net_device *netdev = adapter->netdev;
1596 struct pci_dev *pdev = adapter->pdev;
1597 struct pch_gbe_buffer *buffer_info;
1598 struct pch_gbe_rx_desc *rx_desc;
1599 u32 length;
1600 unsigned int i;
1601 unsigned int cleaned_count = 0;
1602 bool cleaned = false;
1603 struct sk_buff *skb;
1604 u8 dma_status;
1605 u16 gbec_status;
1606 u32 tcp_ip_status;
1607
1608 i = rx_ring->next_to_clean;
1609
1610 while (*work_done < work_to_do) {
1611
1612 rx_desc = PCH_GBE_RX_DESC(*rx_ring, i);
1613 if (rx_desc->gbec_status == DSC_INIT16)
1614 break;
1615 cleaned = true;
1616 cleaned_count++;
1617
1618 dma_status = rx_desc->dma_status;
1619 gbec_status = rx_desc->gbec_status;
1620 tcp_ip_status = rx_desc->tcp_ip_status;
1621 rx_desc->gbec_status = DSC_INIT16;
1622 buffer_info = &rx_ring->buffer_info[i];
1623 skb = buffer_info->skb;
1624 buffer_info->skb = NULL;
1625
1626
1627 dma_unmap_single(&pdev->dev, buffer_info->dma,
1628 buffer_info->length, DMA_FROM_DEVICE);
1629 buffer_info->mapped = false;
1630
1631 netdev_dbg(netdev,
1632 "RxDecNo = 0x%04x Status[DMA:0x%02x GBE:0x%04x TCP:0x%08x] BufInf = 0x%p\n",
1633 i, dma_status, gbec_status, tcp_ip_status,
1634 buffer_info);
1635
1636 if (unlikely(gbec_status & PCH_GBE_RXD_GMAC_STAT_NOTOCTAL)) {
1637 adapter->stats.rx_frame_errors++;
1638 netdev_err(netdev, "Receive Not Octal Error\n");
1639 } else if (unlikely(gbec_status &
1640 PCH_GBE_RXD_GMAC_STAT_NBLERR)) {
1641 adapter->stats.rx_frame_errors++;
1642 netdev_err(netdev, "Receive Nibble Error\n");
1643 } else if (unlikely(gbec_status &
1644 PCH_GBE_RXD_GMAC_STAT_CRCERR)) {
1645 adapter->stats.rx_crc_errors++;
1646 netdev_err(netdev, "Receive CRC Error\n");
1647 } else {
1648
1649
1650 length = (rx_desc->rx_words_eob) - 3 - ETH_FCS_LEN;
1651 if (rx_desc->rx_words_eob & 0x02)
1652 length = length - 4;
1653
1654
1655
1656
1657 memcpy(skb->data, buffer_info->rx_buffer, length);
1658
1659
1660 adapter->stats.rx_bytes += length;
1661 adapter->stats.rx_packets++;
1662 if ((gbec_status & PCH_GBE_RXD_GMAC_STAT_MARMLT))
1663 adapter->stats.multicast++;
1664
1665 skb_put(skb, length);
1666
1667 pch_rx_timestamp(adapter, skb);
1668
1669 skb->protocol = eth_type_trans(skb, netdev);
1670 if (tcp_ip_status & PCH_GBE_RXD_ACC_STAT_TCPIPOK)
1671 skb->ip_summed = CHECKSUM_UNNECESSARY;
1672 else
1673 skb->ip_summed = CHECKSUM_NONE;
1674
1675 napi_gro_receive(&adapter->napi, skb);
1676 (*work_done)++;
1677 netdev_dbg(netdev,
1678 "Receive skb->ip_summed: %d length: %d\n",
1679 skb->ip_summed, length);
1680 }
1681
1682 if (unlikely(cleaned_count >= PCH_GBE_RX_BUFFER_WRITE)) {
1683 pch_gbe_alloc_rx_buffers(adapter, rx_ring,
1684 cleaned_count);
1685 cleaned_count = 0;
1686 }
1687 if (++i == rx_ring->count)
1688 i = 0;
1689 }
1690 rx_ring->next_to_clean = i;
1691 if (cleaned_count)
1692 pch_gbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
1693 return cleaned;
1694}
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter,
1705 struct pch_gbe_tx_ring *tx_ring)
1706{
1707 struct pci_dev *pdev = adapter->pdev;
1708 struct pch_gbe_tx_desc *tx_desc;
1709 int size;
1710 int desNo;
1711
1712 size = (int)sizeof(struct pch_gbe_buffer) * tx_ring->count;
1713 tx_ring->buffer_info = vzalloc(size);
1714 if (!tx_ring->buffer_info)
1715 return -ENOMEM;
1716
1717 tx_ring->size = tx_ring->count * (int)sizeof(struct pch_gbe_tx_desc);
1718
1719 tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
1720 &tx_ring->dma, GFP_KERNEL);
1721 if (!tx_ring->desc) {
1722 vfree(tx_ring->buffer_info);
1723 return -ENOMEM;
1724 }
1725
1726 tx_ring->next_to_use = 0;
1727 tx_ring->next_to_clean = 0;
1728
1729 for (desNo = 0; desNo < tx_ring->count; desNo++) {
1730 tx_desc = PCH_GBE_TX_DESC(*tx_ring, desNo);
1731 tx_desc->gbec_status = DSC_INIT16;
1732 }
1733 netdev_dbg(adapter->netdev,
1734 "tx_ring->desc = 0x%p tx_ring->dma = 0x%08llx next_to_clean = 0x%08x next_to_use = 0x%08x\n",
1735 tx_ring->desc, (unsigned long long)tx_ring->dma,
1736 tx_ring->next_to_clean, tx_ring->next_to_use);
1737 return 0;
1738}
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748int pch_gbe_setup_rx_resources(struct pch_gbe_adapter *adapter,
1749 struct pch_gbe_rx_ring *rx_ring)
1750{
1751 struct pci_dev *pdev = adapter->pdev;
1752 struct pch_gbe_rx_desc *rx_desc;
1753 int size;
1754 int desNo;
1755
1756 size = (int)sizeof(struct pch_gbe_buffer) * rx_ring->count;
1757 rx_ring->buffer_info = vzalloc(size);
1758 if (!rx_ring->buffer_info)
1759 return -ENOMEM;
1760
1761 rx_ring->size = rx_ring->count * (int)sizeof(struct pch_gbe_rx_desc);
1762 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
1763 &rx_ring->dma, GFP_KERNEL);
1764 if (!rx_ring->desc) {
1765 vfree(rx_ring->buffer_info);
1766 return -ENOMEM;
1767 }
1768 rx_ring->next_to_clean = 0;
1769 rx_ring->next_to_use = 0;
1770 for (desNo = 0; desNo < rx_ring->count; desNo++) {
1771 rx_desc = PCH_GBE_RX_DESC(*rx_ring, desNo);
1772 rx_desc->gbec_status = DSC_INIT16;
1773 }
1774 netdev_dbg(adapter->netdev,
1775 "rx_ring->desc = 0x%p rx_ring->dma = 0x%08llx next_to_clean = 0x%08x next_to_use = 0x%08x\n",
1776 rx_ring->desc, (unsigned long long)rx_ring->dma,
1777 rx_ring->next_to_clean, rx_ring->next_to_use);
1778 return 0;
1779}
1780
1781
1782
1783
1784
1785
1786void pch_gbe_free_tx_resources(struct pch_gbe_adapter *adapter,
1787 struct pch_gbe_tx_ring *tx_ring)
1788{
1789 struct pci_dev *pdev = adapter->pdev;
1790
1791 pch_gbe_clean_tx_ring(adapter, tx_ring);
1792 vfree(tx_ring->buffer_info);
1793 tx_ring->buffer_info = NULL;
1794 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
1795 tx_ring->dma);
1796 tx_ring->desc = NULL;
1797}
1798
1799
1800
1801
1802
1803
1804void pch_gbe_free_rx_resources(struct pch_gbe_adapter *adapter,
1805 struct pch_gbe_rx_ring *rx_ring)
1806{
1807 struct pci_dev *pdev = adapter->pdev;
1808
1809 pch_gbe_clean_rx_ring(adapter, rx_ring);
1810 vfree(rx_ring->buffer_info);
1811 rx_ring->buffer_info = NULL;
1812 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
1813 rx_ring->dma);
1814 rx_ring->desc = NULL;
1815}
1816
1817
1818
1819
1820
1821
1822
1823
1824static int pch_gbe_request_irq(struct pch_gbe_adapter *adapter)
1825{
1826 struct net_device *netdev = adapter->netdev;
1827 int err;
1828
1829 err = pci_alloc_irq_vectors(adapter->pdev, 1, 1, PCI_IRQ_ALL_TYPES);
1830 if (err < 0)
1831 return err;
1832
1833 adapter->irq = pci_irq_vector(adapter->pdev, 0);
1834
1835 err = request_irq(adapter->irq, &pch_gbe_intr, IRQF_SHARED,
1836 netdev->name, netdev);
1837 if (err)
1838 netdev_err(netdev, "Unable to allocate interrupt Error: %d\n",
1839 err);
1840 netdev_dbg(netdev, "have_msi : %d return : 0x%04x\n",
1841 pci_dev_msi_enabled(adapter->pdev), err);
1842 return err;
1843}
1844
1845
1846
1847
1848
1849
1850
1851
1852int pch_gbe_up(struct pch_gbe_adapter *adapter)
1853{
1854 struct net_device *netdev = adapter->netdev;
1855 struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring;
1856 struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring;
1857 int err = -EINVAL;
1858
1859
1860 if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
1861 netdev_err(netdev, "Error: Invalid MAC address\n");
1862 goto out;
1863 }
1864
1865
1866 pch_gbe_set_multi(netdev);
1867
1868 pch_gbe_setup_tctl(adapter);
1869 pch_gbe_configure_tx(adapter);
1870 pch_gbe_setup_rctl(adapter);
1871 pch_gbe_configure_rx(adapter);
1872
1873 err = pch_gbe_request_irq(adapter);
1874 if (err) {
1875 netdev_err(netdev,
1876 "Error: can't bring device up - irq request failed\n");
1877 goto out;
1878 }
1879 err = pch_gbe_alloc_rx_buffers_pool(adapter, rx_ring, rx_ring->count);
1880 if (err) {
1881 netdev_err(netdev,
1882 "Error: can't bring device up - alloc rx buffers pool failed\n");
1883 goto freeirq;
1884 }
1885 pch_gbe_alloc_tx_buffers(adapter, tx_ring);
1886 pch_gbe_alloc_rx_buffers(adapter, rx_ring, rx_ring->count);
1887 adapter->tx_queue_len = netdev->tx_queue_len;
1888 pch_gbe_enable_dma_rx(&adapter->hw);
1889 pch_gbe_enable_mac_rx(&adapter->hw);
1890
1891 mod_timer(&adapter->watchdog_timer, jiffies);
1892
1893 napi_enable(&adapter->napi);
1894 pch_gbe_irq_enable(adapter);
1895 netif_start_queue(adapter->netdev);
1896
1897 return 0;
1898
1899freeirq:
1900 pch_gbe_free_irq(adapter);
1901out:
1902 return err;
1903}
1904
1905
1906
1907
1908
1909void pch_gbe_down(struct pch_gbe_adapter *adapter)
1910{
1911 struct net_device *netdev = adapter->netdev;
1912 struct pci_dev *pdev = adapter->pdev;
1913 struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring;
1914
1915
1916
1917 napi_disable(&adapter->napi);
1918 atomic_set(&adapter->irq_sem, 0);
1919
1920 pch_gbe_irq_disable(adapter);
1921 pch_gbe_free_irq(adapter);
1922
1923 del_timer_sync(&adapter->watchdog_timer);
1924
1925 netdev->tx_queue_len = adapter->tx_queue_len;
1926 netif_carrier_off(netdev);
1927 netif_stop_queue(netdev);
1928
1929 if ((pdev->error_state) && (pdev->error_state != pci_channel_io_normal))
1930 pch_gbe_reset(adapter);
1931 pch_gbe_clean_tx_ring(adapter, adapter->tx_ring);
1932 pch_gbe_clean_rx_ring(adapter, adapter->rx_ring);
1933
1934 dma_free_coherent(&adapter->pdev->dev, rx_ring->rx_buff_pool_size,
1935 rx_ring->rx_buff_pool, rx_ring->rx_buff_pool_logic);
1936 rx_ring->rx_buff_pool_logic = 0;
1937 rx_ring->rx_buff_pool_size = 0;
1938 rx_ring->rx_buff_pool = NULL;
1939}
1940
1941
1942
1943
1944
1945
1946
1947
1948static int pch_gbe_sw_init(struct pch_gbe_adapter *adapter)
1949{
1950 struct pch_gbe_hw *hw = &adapter->hw;
1951 struct net_device *netdev = adapter->netdev;
1952
1953 adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_2048;
1954 hw->mac.max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1955 hw->mac.min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
1956 hw->phy.reset_delay_us = PCH_GBE_PHY_RESET_DELAY_US;
1957
1958 if (pch_gbe_alloc_queues(adapter)) {
1959 netdev_err(netdev, "Unable to allocate memory for queues\n");
1960 return -ENOMEM;
1961 }
1962 spin_lock_init(&adapter->hw.miim_lock);
1963 spin_lock_init(&adapter->stats_lock);
1964 spin_lock_init(&adapter->ethtool_lock);
1965 atomic_set(&adapter->irq_sem, 0);
1966 pch_gbe_irq_disable(adapter);
1967
1968 pch_gbe_init_stats(adapter);
1969
1970 netdev_dbg(netdev,
1971 "rx_buffer_len : %d mac.min_frame_size : %d mac.max_frame_size : %d\n",
1972 (u32) adapter->rx_buffer_len,
1973 hw->mac.min_frame_size, hw->mac.max_frame_size);
1974 return 0;
1975}
1976
1977
1978
1979
1980
1981
1982
1983
1984static int pch_gbe_open(struct net_device *netdev)
1985{
1986 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
1987 struct pch_gbe_hw *hw = &adapter->hw;
1988 int err;
1989
1990
1991 err = pch_gbe_setup_tx_resources(adapter, adapter->tx_ring);
1992 if (err)
1993 goto err_setup_tx;
1994
1995 err = pch_gbe_setup_rx_resources(adapter, adapter->rx_ring);
1996 if (err)
1997 goto err_setup_rx;
1998 pch_gbe_phy_power_up(hw);
1999 err = pch_gbe_up(adapter);
2000 if (err)
2001 goto err_up;
2002 netdev_dbg(netdev, "Success End\n");
2003 return 0;
2004
2005err_up:
2006 if (!adapter->wake_up_evt)
2007 pch_gbe_phy_power_down(hw);
2008 pch_gbe_free_rx_resources(adapter, adapter->rx_ring);
2009err_setup_rx:
2010 pch_gbe_free_tx_resources(adapter, adapter->tx_ring);
2011err_setup_tx:
2012 pch_gbe_reset(adapter);
2013 netdev_err(netdev, "Error End\n");
2014 return err;
2015}
2016
2017
2018
2019
2020
2021
2022
2023static int pch_gbe_stop(struct net_device *netdev)
2024{
2025 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2026 struct pch_gbe_hw *hw = &adapter->hw;
2027
2028 pch_gbe_down(adapter);
2029 if (!adapter->wake_up_evt)
2030 pch_gbe_phy_power_down(hw);
2031 pch_gbe_free_tx_resources(adapter, adapter->tx_ring);
2032 pch_gbe_free_rx_resources(adapter, adapter->rx_ring);
2033 return 0;
2034}
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044static netdev_tx_t pch_gbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2045{
2046 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2047 struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring;
2048
2049 if (unlikely(!PCH_GBE_DESC_UNUSED(tx_ring))) {
2050 netif_stop_queue(netdev);
2051 netdev_dbg(netdev,
2052 "Return : BUSY next_to use : 0x%08x next_to clean : 0x%08x\n",
2053 tx_ring->next_to_use, tx_ring->next_to_clean);
2054 return NETDEV_TX_BUSY;
2055 }
2056
2057
2058 pch_gbe_tx_queue(adapter, tx_ring, skb);
2059 return NETDEV_TX_OK;
2060}
2061
2062
2063
2064
2065
2066static void pch_gbe_set_multi(struct net_device *netdev)
2067{
2068 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2069 struct pch_gbe_hw *hw = &adapter->hw;
2070 struct netdev_hw_addr *ha;
2071 u32 rctl, adrmask;
2072 int mc_count, i;
2073
2074 netdev_dbg(netdev, "netdev->flags : 0x%08x\n", netdev->flags);
2075
2076
2077 rctl = ioread32(&hw->reg->RX_MODE);
2078 rctl |= PCH_GBE_ADD_FIL_EN | PCH_GBE_MLT_FIL_EN;
2079
2080
2081 if (netdev->flags & IFF_PROMISC)
2082 rctl &= ~(PCH_GBE_ADD_FIL_EN | PCH_GBE_MLT_FIL_EN);
2083
2084
2085
2086
2087 mc_count = netdev_mc_count(netdev);
2088 if ((netdev->flags & IFF_ALLMULTI) || mc_count >= PCH_GBE_MAR_ENTRIES)
2089 rctl &= ~PCH_GBE_MLT_FIL_EN;
2090
2091 iowrite32(rctl, &hw->reg->RX_MODE);
2092
2093
2094
2095
2096 if (!(rctl & PCH_GBE_MLT_FIL_EN))
2097 return;
2098
2099
2100
2101
2102 i = 1;
2103 netdev_for_each_mc_addr(ha, netdev)
2104 pch_gbe_mac_mar_set(hw, ha->addr, i++);
2105
2106
2107 for (; i < PCH_GBE_MAR_ENTRIES; i++) {
2108
2109 adrmask = ioread32(&hw->reg->ADDR_MASK);
2110 iowrite32(adrmask | BIT(i), &hw->reg->ADDR_MASK);
2111
2112 pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
2113
2114 iowrite32(0, &hw->reg->mac_adr[i].high);
2115 iowrite32(0, &hw->reg->mac_adr[i].low);
2116 }
2117
2118 netdev_dbg(netdev,
2119 "RX_MODE reg(check bit31,30 ADD,MLT) : 0x%08x netdev->mc_count : 0x%08x\n",
2120 ioread32(&hw->reg->RX_MODE), mc_count);
2121}
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131static int pch_gbe_set_mac(struct net_device *netdev, void *addr)
2132{
2133 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2134 struct sockaddr *skaddr = addr;
2135 int ret_val;
2136
2137 if (!is_valid_ether_addr(skaddr->sa_data)) {
2138 ret_val = -EADDRNOTAVAIL;
2139 } else {
2140 memcpy(netdev->dev_addr, skaddr->sa_data, netdev->addr_len);
2141 memcpy(adapter->hw.mac.addr, skaddr->sa_data, netdev->addr_len);
2142 pch_gbe_mac_mar_set(&adapter->hw, adapter->hw.mac.addr, 0);
2143 ret_val = 0;
2144 }
2145 netdev_dbg(netdev, "ret_val : 0x%08x\n", ret_val);
2146 netdev_dbg(netdev, "dev_addr : %pM\n", netdev->dev_addr);
2147 netdev_dbg(netdev, "mac_addr : %pM\n", adapter->hw.mac.addr);
2148 netdev_dbg(netdev, "MAC_ADR1AB reg : 0x%08x 0x%08x\n",
2149 ioread32(&adapter->hw.reg->mac_adr[0].high),
2150 ioread32(&adapter->hw.reg->mac_adr[0].low));
2151 return ret_val;
2152}
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162static int pch_gbe_change_mtu(struct net_device *netdev, int new_mtu)
2163{
2164 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2165 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
2166 unsigned long old_rx_buffer_len = adapter->rx_buffer_len;
2167 int err;
2168
2169 if (max_frame <= PCH_GBE_FRAME_SIZE_2048)
2170 adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_2048;
2171 else if (max_frame <= PCH_GBE_FRAME_SIZE_4096)
2172 adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_4096;
2173 else if (max_frame <= PCH_GBE_FRAME_SIZE_8192)
2174 adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_8192;
2175 else
2176 adapter->rx_buffer_len = PCH_GBE_MAX_RX_BUFFER_SIZE;
2177
2178 if (netif_running(netdev)) {
2179 pch_gbe_down(adapter);
2180 err = pch_gbe_up(adapter);
2181 if (err) {
2182 adapter->rx_buffer_len = old_rx_buffer_len;
2183 pch_gbe_up(adapter);
2184 return err;
2185 } else {
2186 netdev->mtu = new_mtu;
2187 adapter->hw.mac.max_frame_size = max_frame;
2188 }
2189 } else {
2190 pch_gbe_reset(adapter);
2191 netdev->mtu = new_mtu;
2192 adapter->hw.mac.max_frame_size = max_frame;
2193 }
2194
2195 netdev_dbg(netdev,
2196 "max_frame : %d rx_buffer_len : %d mtu : %d max_frame_size : %d\n",
2197 max_frame, (u32) adapter->rx_buffer_len, netdev->mtu,
2198 adapter->hw.mac.max_frame_size);
2199 return 0;
2200}
2201
2202
2203
2204
2205
2206
2207
2208
2209static int pch_gbe_set_features(struct net_device *netdev,
2210 netdev_features_t features)
2211{
2212 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2213 netdev_features_t changed = features ^ netdev->features;
2214
2215 if (!(changed & NETIF_F_RXCSUM))
2216 return 0;
2217
2218 if (netif_running(netdev))
2219 pch_gbe_reinit_locked(adapter);
2220 else
2221 pch_gbe_reset(adapter);
2222
2223 return 0;
2224}
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235static int pch_gbe_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2236{
2237 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2238
2239 netdev_dbg(netdev, "cmd : 0x%04x\n", cmd);
2240
2241 if (cmd == SIOCSHWTSTAMP)
2242 return hwtstamp_ioctl(netdev, ifr, cmd);
2243
2244 return generic_mii_ioctl(&adapter->mii, if_mii(ifr), cmd, NULL);
2245}
2246
2247
2248
2249
2250
2251
2252static void pch_gbe_tx_timeout(struct net_device *netdev, unsigned int txqueue)
2253{
2254 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2255
2256
2257 adapter->stats.tx_timeout_count++;
2258 schedule_work(&adapter->reset_task);
2259}
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269static int pch_gbe_napi_poll(struct napi_struct *napi, int budget)
2270{
2271 struct pch_gbe_adapter *adapter =
2272 container_of(napi, struct pch_gbe_adapter, napi);
2273 int work_done = 0;
2274 bool poll_end_flag = false;
2275 bool cleaned = false;
2276
2277 netdev_dbg(adapter->netdev, "budget : %d\n", budget);
2278
2279 pch_gbe_clean_rx(adapter, adapter->rx_ring, &work_done, budget);
2280 cleaned = pch_gbe_clean_tx(adapter, adapter->tx_ring);
2281
2282 if (cleaned)
2283 work_done = budget;
2284
2285
2286
2287 if (work_done < budget)
2288 poll_end_flag = true;
2289
2290 if (poll_end_flag) {
2291 napi_complete_done(napi, work_done);
2292 pch_gbe_irq_enable(adapter);
2293 }
2294
2295 if (adapter->rx_stop_flag) {
2296 adapter->rx_stop_flag = false;
2297 pch_gbe_enable_dma_rx(&adapter->hw);
2298 }
2299
2300 netdev_dbg(adapter->netdev,
2301 "poll_end_flag : %d work_done : %d budget : %d\n",
2302 poll_end_flag, work_done, budget);
2303
2304 return work_done;
2305}
2306
2307#ifdef CONFIG_NET_POLL_CONTROLLER
2308
2309
2310
2311
2312static void pch_gbe_netpoll(struct net_device *netdev)
2313{
2314 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2315
2316 disable_irq(adapter->irq);
2317 pch_gbe_intr(adapter->irq, netdev);
2318 enable_irq(adapter->irq);
2319}
2320#endif
2321
2322static const struct net_device_ops pch_gbe_netdev_ops = {
2323 .ndo_open = pch_gbe_open,
2324 .ndo_stop = pch_gbe_stop,
2325 .ndo_start_xmit = pch_gbe_xmit_frame,
2326 .ndo_set_mac_address = pch_gbe_set_mac,
2327 .ndo_tx_timeout = pch_gbe_tx_timeout,
2328 .ndo_change_mtu = pch_gbe_change_mtu,
2329 .ndo_set_features = pch_gbe_set_features,
2330 .ndo_eth_ioctl = pch_gbe_ioctl,
2331 .ndo_set_rx_mode = pch_gbe_set_multi,
2332#ifdef CONFIG_NET_POLL_CONTROLLER
2333 .ndo_poll_controller = pch_gbe_netpoll,
2334#endif
2335};
2336
2337static pci_ers_result_t pch_gbe_io_error_detected(struct pci_dev *pdev,
2338 pci_channel_state_t state)
2339{
2340 struct net_device *netdev = pci_get_drvdata(pdev);
2341 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2342
2343 netif_device_detach(netdev);
2344 if (netif_running(netdev))
2345 pch_gbe_down(adapter);
2346 pci_disable_device(pdev);
2347
2348 return PCI_ERS_RESULT_NEED_RESET;
2349}
2350
2351static pci_ers_result_t pch_gbe_io_slot_reset(struct pci_dev *pdev)
2352{
2353 struct net_device *netdev = pci_get_drvdata(pdev);
2354 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2355 struct pch_gbe_hw *hw = &adapter->hw;
2356
2357 if (pci_enable_device(pdev)) {
2358 netdev_err(netdev, "Cannot re-enable PCI device after reset\n");
2359 return PCI_ERS_RESULT_DISCONNECT;
2360 }
2361 pci_set_master(pdev);
2362 pci_enable_wake(pdev, PCI_D0, 0);
2363 pch_gbe_phy_power_up(hw);
2364 pch_gbe_reset(adapter);
2365
2366 pch_gbe_mac_set_wol_event(hw, 0);
2367
2368 return PCI_ERS_RESULT_RECOVERED;
2369}
2370
2371static void pch_gbe_io_resume(struct pci_dev *pdev)
2372{
2373 struct net_device *netdev = pci_get_drvdata(pdev);
2374 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2375
2376 if (netif_running(netdev)) {
2377 if (pch_gbe_up(adapter)) {
2378 netdev_dbg(netdev,
2379 "can't bring device back up after reset\n");
2380 return;
2381 }
2382 }
2383 netif_device_attach(netdev);
2384}
2385
2386static int __pch_gbe_suspend(struct pci_dev *pdev)
2387{
2388 struct net_device *netdev = pci_get_drvdata(pdev);
2389 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2390 struct pch_gbe_hw *hw = &adapter->hw;
2391 u32 wufc = adapter->wake_up_evt;
2392
2393 netif_device_detach(netdev);
2394 if (netif_running(netdev))
2395 pch_gbe_down(adapter);
2396 if (wufc) {
2397 pch_gbe_set_multi(netdev);
2398 pch_gbe_setup_rctl(adapter);
2399 pch_gbe_configure_rx(adapter);
2400 pch_gbe_set_rgmii_ctrl(adapter, hw->mac.link_speed,
2401 hw->mac.link_duplex);
2402 pch_gbe_set_mode(adapter, hw->mac.link_speed,
2403 hw->mac.link_duplex);
2404 pch_gbe_mac_set_wol_event(hw, wufc);
2405 pci_disable_device(pdev);
2406 } else {
2407 pch_gbe_phy_power_down(hw);
2408 pch_gbe_mac_set_wol_event(hw, wufc);
2409 pci_disable_device(pdev);
2410 }
2411 return 0;
2412}
2413
2414#ifdef CONFIG_PM
2415static int pch_gbe_suspend(struct device *device)
2416{
2417 struct pci_dev *pdev = to_pci_dev(device);
2418
2419 return __pch_gbe_suspend(pdev);
2420}
2421
2422static int pch_gbe_resume(struct device *device)
2423{
2424 struct pci_dev *pdev = to_pci_dev(device);
2425 struct net_device *netdev = pci_get_drvdata(pdev);
2426 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2427 struct pch_gbe_hw *hw = &adapter->hw;
2428 u32 err;
2429
2430 err = pci_enable_device(pdev);
2431 if (err) {
2432 netdev_err(netdev, "Cannot enable PCI device from suspend\n");
2433 return err;
2434 }
2435 pci_set_master(pdev);
2436 pch_gbe_phy_power_up(hw);
2437 pch_gbe_reset(adapter);
2438
2439 pch_gbe_mac_set_wol_event(hw, 0);
2440
2441 if (netif_running(netdev))
2442 pch_gbe_up(adapter);
2443 netif_device_attach(netdev);
2444
2445 return 0;
2446}
2447#endif
2448
2449static void pch_gbe_shutdown(struct pci_dev *pdev)
2450{
2451 __pch_gbe_suspend(pdev);
2452 if (system_state == SYSTEM_POWER_OFF) {
2453 pci_wake_from_d3(pdev, true);
2454 pci_set_power_state(pdev, PCI_D3hot);
2455 }
2456}
2457
2458static void pch_gbe_remove(struct pci_dev *pdev)
2459{
2460 struct net_device *netdev = pci_get_drvdata(pdev);
2461 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2462
2463 cancel_work_sync(&adapter->reset_task);
2464 unregister_netdev(netdev);
2465
2466 pch_gbe_phy_hw_reset(&adapter->hw);
2467
2468 free_netdev(netdev);
2469}
2470
2471static int pch_gbe_probe(struct pci_dev *pdev,
2472 const struct pci_device_id *pci_id)
2473{
2474 struct net_device *netdev;
2475 struct pch_gbe_adapter *adapter;
2476 int ret;
2477
2478 ret = pcim_enable_device(pdev);
2479 if (ret)
2480 return ret;
2481
2482 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
2483 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
2484 if (ret) {
2485 dev_err(&pdev->dev, "ERR: No usable DMA configuration, aborting\n");
2486 return ret;
2487 }
2488 }
2489
2490 ret = pcim_iomap_regions(pdev, 1 << PCH_GBE_PCI_BAR, pci_name(pdev));
2491 if (ret) {
2492 dev_err(&pdev->dev,
2493 "ERR: Can't reserve PCI I/O and memory resources\n");
2494 return ret;
2495 }
2496 pci_set_master(pdev);
2497
2498 netdev = alloc_etherdev((int)sizeof(struct pch_gbe_adapter));
2499 if (!netdev)
2500 return -ENOMEM;
2501 SET_NETDEV_DEV(netdev, &pdev->dev);
2502
2503 pci_set_drvdata(pdev, netdev);
2504 adapter = netdev_priv(netdev);
2505 adapter->netdev = netdev;
2506 adapter->pdev = pdev;
2507 adapter->hw.back = adapter;
2508 adapter->hw.reg = pcim_iomap_table(pdev)[PCH_GBE_PCI_BAR];
2509
2510 adapter->pdata = (struct pch_gbe_privdata *)pci_id->driver_data;
2511 if (adapter->pdata && adapter->pdata->platform_init) {
2512 ret = adapter->pdata->platform_init(pdev);
2513 if (ret)
2514 goto err_free_netdev;
2515 }
2516
2517 adapter->ptp_pdev =
2518 pci_get_domain_bus_and_slot(pci_domain_nr(adapter->pdev->bus),
2519 adapter->pdev->bus->number,
2520 PCI_DEVFN(12, 4));
2521
2522 netdev->netdev_ops = &pch_gbe_netdev_ops;
2523 netdev->watchdog_timeo = PCH_GBE_WATCHDOG_PERIOD;
2524 netif_napi_add(netdev, &adapter->napi,
2525 pch_gbe_napi_poll, PCH_GBE_RX_WEIGHT);
2526 netdev->hw_features = NETIF_F_RXCSUM |
2527 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2528 netdev->features = netdev->hw_features;
2529 pch_gbe_set_ethtool_ops(netdev);
2530
2531
2532 netdev->min_mtu = ETH_ZLEN - ETH_HLEN;
2533 netdev->max_mtu = PCH_GBE_MAX_JUMBO_FRAME_SIZE -
2534 (ETH_HLEN + ETH_FCS_LEN);
2535
2536 pch_gbe_mac_load_mac_addr(&adapter->hw);
2537 pch_gbe_mac_reset_hw(&adapter->hw);
2538
2539
2540 ret = pch_gbe_sw_init(adapter);
2541 if (ret)
2542 goto err_free_netdev;
2543
2544
2545 ret = pch_gbe_init_phy(adapter);
2546 if (ret) {
2547 dev_err(&pdev->dev, "PHY initialize error\n");
2548 goto err_free_adapter;
2549 }
2550
2551
2552 ret = pch_gbe_mac_read_mac_addr(&adapter->hw);
2553 if (ret) {
2554 dev_err(&pdev->dev, "MAC address Read Error\n");
2555 goto err_free_adapter;
2556 }
2557
2558 memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
2559 if (!is_valid_ether_addr(netdev->dev_addr)) {
2560
2561
2562
2563
2564
2565
2566 dev_err(&pdev->dev, "Invalid MAC address, "
2567 "interface disabled.\n");
2568 }
2569 timer_setup(&adapter->watchdog_timer, pch_gbe_watchdog, 0);
2570
2571 INIT_WORK(&adapter->reset_task, pch_gbe_reset_task);
2572
2573 pch_gbe_check_options(adapter);
2574
2575
2576 adapter->wake_up_evt = PCH_GBE_WL_INIT_SETTING;
2577 dev_info(&pdev->dev, "MAC address : %pM\n", netdev->dev_addr);
2578
2579
2580 pch_gbe_reset(adapter);
2581
2582 ret = register_netdev(netdev);
2583 if (ret)
2584 goto err_free_adapter;
2585
2586 netif_carrier_off(netdev);
2587 netif_stop_queue(netdev);
2588
2589 dev_dbg(&pdev->dev, "PCH Network Connection\n");
2590
2591
2592 if (adapter->pdata && adapter->pdata->phy_disable_hibernate)
2593 pch_gbe_phy_disable_hibernate(&adapter->hw);
2594
2595 device_set_wakeup_enable(&pdev->dev, 1);
2596 return 0;
2597
2598err_free_adapter:
2599 pch_gbe_phy_hw_reset(&adapter->hw);
2600err_free_netdev:
2601 free_netdev(netdev);
2602 return ret;
2603}
2604
2605static void pch_gbe_gpio_remove_table(void *table)
2606{
2607 gpiod_remove_lookup_table(table);
2608}
2609
2610static int pch_gbe_gpio_add_table(struct device *dev, void *table)
2611{
2612 gpiod_add_lookup_table(table);
2613 return devm_add_action_or_reset(dev, pch_gbe_gpio_remove_table, table);
2614}
2615
2616static struct gpiod_lookup_table pch_gbe_minnow_gpio_table = {
2617 .dev_id = "0000:02:00.1",
2618 .table = {
2619 GPIO_LOOKUP("sch_gpio.33158", 13, NULL, GPIO_ACTIVE_LOW),
2620 {}
2621 },
2622};
2623
2624
2625
2626
2627static int pch_gbe_minnow_platform_init(struct pci_dev *pdev)
2628{
2629 struct gpio_desc *gpiod;
2630 int ret;
2631
2632 ret = pch_gbe_gpio_add_table(&pdev->dev, &pch_gbe_minnow_gpio_table);
2633 if (ret)
2634 return ret;
2635
2636 gpiod = devm_gpiod_get(&pdev->dev, NULL, GPIOD_OUT_HIGH);
2637 if (IS_ERR(gpiod))
2638 return dev_err_probe(&pdev->dev, PTR_ERR(gpiod),
2639 "Can't request PHY reset GPIO line\n");
2640
2641 gpiod_set_value(gpiod, 1);
2642 usleep_range(1250, 1500);
2643 gpiod_set_value(gpiod, 0);
2644 usleep_range(1250, 1500);
2645
2646 return ret;
2647}
2648
2649static struct pch_gbe_privdata pch_gbe_minnow_privdata = {
2650 .phy_tx_clk_delay = true,
2651 .phy_disable_hibernate = true,
2652 .platform_init = pch_gbe_minnow_platform_init,
2653};
2654
2655static const struct pci_device_id pch_gbe_pcidev_id[] = {
2656 {.vendor = PCI_VENDOR_ID_INTEL,
2657 .device = PCI_DEVICE_ID_INTEL_IOH1_GBE,
2658 .subvendor = PCI_VENDOR_ID_CIRCUITCO,
2659 .subdevice = PCI_SUBSYSTEM_ID_CIRCUITCO_MINNOWBOARD,
2660 .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
2661 .class_mask = (0xFFFF00),
2662 .driver_data = (kernel_ulong_t)&pch_gbe_minnow_privdata
2663 },
2664 {.vendor = PCI_VENDOR_ID_INTEL,
2665 .device = PCI_DEVICE_ID_INTEL_IOH1_GBE,
2666 .subvendor = PCI_ANY_ID,
2667 .subdevice = PCI_ANY_ID,
2668 .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
2669 .class_mask = (0xFFFF00)
2670 },
2671 {.vendor = PCI_VENDOR_ID_ROHM,
2672 .device = PCI_DEVICE_ID_ROHM_ML7223_GBE,
2673 .subvendor = PCI_ANY_ID,
2674 .subdevice = PCI_ANY_ID,
2675 .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
2676 .class_mask = (0xFFFF00)
2677 },
2678 {.vendor = PCI_VENDOR_ID_ROHM,
2679 .device = PCI_DEVICE_ID_ROHM_ML7831_GBE,
2680 .subvendor = PCI_ANY_ID,
2681 .subdevice = PCI_ANY_ID,
2682 .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
2683 .class_mask = (0xFFFF00)
2684 },
2685
2686 {0}
2687};
2688
2689#ifdef CONFIG_PM
2690static const struct dev_pm_ops pch_gbe_pm_ops = {
2691 .suspend = pch_gbe_suspend,
2692 .resume = pch_gbe_resume,
2693 .freeze = pch_gbe_suspend,
2694 .thaw = pch_gbe_resume,
2695 .poweroff = pch_gbe_suspend,
2696 .restore = pch_gbe_resume,
2697};
2698#endif
2699
2700static const struct pci_error_handlers pch_gbe_err_handler = {
2701 .error_detected = pch_gbe_io_error_detected,
2702 .slot_reset = pch_gbe_io_slot_reset,
2703 .resume = pch_gbe_io_resume
2704};
2705
2706static struct pci_driver pch_gbe_driver = {
2707 .name = KBUILD_MODNAME,
2708 .id_table = pch_gbe_pcidev_id,
2709 .probe = pch_gbe_probe,
2710 .remove = pch_gbe_remove,
2711#ifdef CONFIG_PM
2712 .driver.pm = &pch_gbe_pm_ops,
2713#endif
2714 .shutdown = pch_gbe_shutdown,
2715 .err_handler = &pch_gbe_err_handler
2716};
2717module_pci_driver(pch_gbe_driver);
2718
2719MODULE_DESCRIPTION("EG20T PCH Gigabit ethernet Driver");
2720MODULE_AUTHOR("LAPIS SEMICONDUCTOR, <tshimizu818@gmail.com>");
2721MODULE_LICENSE("GPL");
2722MODULE_DEVICE_TABLE(pci, pch_gbe_pcidev_id);
2723
2724
2725