1
2
3
4
5
6
7
8
9
10
11
12#include <linux/module.h>
13#include <linux/moduleparam.h>
14#include <linux/pci.h>
15#include <linux/netdevice.h>
16#include <linux/etherdevice.h>
17#include <linux/clk.h>
18#include <linux/delay.h>
19#include <linux/ethtool.h>
20#include <linux/phy.h>
21#include <linux/if_vlan.h>
22#include <linux/crc32.h>
23#include <linux/in.h>
24#include <linux/io.h>
25#include <linux/ip.h>
26#include <linux/tcp.h>
27#include <linux/interrupt.h>
28#include <linux/dma-mapping.h>
29#include <linux/pm_runtime.h>
30#include <linux/prefetch.h>
31#include <linux/ipv6.h>
32#include <net/ip6_checksum.h>
33
34#include "r8169.h"
35#include "r8169_firmware.h"
36
37#define MODULENAME "r8169"
38
39#define FIRMWARE_8168D_1 "rtl_nic/rtl8168d-1.fw"
40#define FIRMWARE_8168D_2 "rtl_nic/rtl8168d-2.fw"
41#define FIRMWARE_8168E_1 "rtl_nic/rtl8168e-1.fw"
42#define FIRMWARE_8168E_2 "rtl_nic/rtl8168e-2.fw"
43#define FIRMWARE_8168E_3 "rtl_nic/rtl8168e-3.fw"
44#define FIRMWARE_8168F_1 "rtl_nic/rtl8168f-1.fw"
45#define FIRMWARE_8168F_2 "rtl_nic/rtl8168f-2.fw"
46#define FIRMWARE_8105E_1 "rtl_nic/rtl8105e-1.fw"
47#define FIRMWARE_8402_1 "rtl_nic/rtl8402-1.fw"
48#define FIRMWARE_8411_1 "rtl_nic/rtl8411-1.fw"
49#define FIRMWARE_8411_2 "rtl_nic/rtl8411-2.fw"
50#define FIRMWARE_8106E_1 "rtl_nic/rtl8106e-1.fw"
51#define FIRMWARE_8106E_2 "rtl_nic/rtl8106e-2.fw"
52#define FIRMWARE_8168G_2 "rtl_nic/rtl8168g-2.fw"
53#define FIRMWARE_8168G_3 "rtl_nic/rtl8168g-3.fw"
54#define FIRMWARE_8168H_1 "rtl_nic/rtl8168h-1.fw"
55#define FIRMWARE_8168H_2 "rtl_nic/rtl8168h-2.fw"
56#define FIRMWARE_8168FP_3 "rtl_nic/rtl8168fp-3.fw"
57#define FIRMWARE_8107E_1 "rtl_nic/rtl8107e-1.fw"
58#define FIRMWARE_8107E_2 "rtl_nic/rtl8107e-2.fw"
59#define FIRMWARE_8125A_3 "rtl_nic/rtl8125a-3.fw"
60
61#define R8169_MSG_DEFAULT \
62 (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN)
63
64
65
66#define MC_FILTER_LIMIT 32
67
68#define TX_DMA_BURST 7
69#define InterFrameGap 0x03
70
71#define R8169_REGS_SIZE 256
72#define R8169_RX_BUF_SIZE (SZ_16K - 1)
73#define NUM_TX_DESC 64
74#define NUM_RX_DESC 256U
75#define R8169_TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc))
76#define R8169_RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
77
78#define RTL_CFG_NO_GBIT 1
79
80
81#define RTL_W8(tp, reg, val8) writeb((val8), tp->mmio_addr + (reg))
82#define RTL_W16(tp, reg, val16) writew((val16), tp->mmio_addr + (reg))
83#define RTL_W32(tp, reg, val32) writel((val32), tp->mmio_addr + (reg))
84#define RTL_R8(tp, reg) readb(tp->mmio_addr + (reg))
85#define RTL_R16(tp, reg) readw(tp->mmio_addr + (reg))
86#define RTL_R32(tp, reg) readl(tp->mmio_addr + (reg))
87
88#define JUMBO_4K (4*1024 - ETH_HLEN - 2)
89#define JUMBO_6K (6*1024 - ETH_HLEN - 2)
90#define JUMBO_7K (7*1024 - ETH_HLEN - 2)
91#define JUMBO_9K (9*1024 - ETH_HLEN - 2)
92
93static const struct {
94 const char *name;
95 const char *fw_name;
96} rtl_chip_infos[] = {
97
98 [RTL_GIGA_MAC_VER_02] = {"RTL8169s" },
99 [RTL_GIGA_MAC_VER_03] = {"RTL8110s" },
100 [RTL_GIGA_MAC_VER_04] = {"RTL8169sb/8110sb" },
101 [RTL_GIGA_MAC_VER_05] = {"RTL8169sc/8110sc" },
102 [RTL_GIGA_MAC_VER_06] = {"RTL8169sc/8110sc" },
103
104 [RTL_GIGA_MAC_VER_07] = {"RTL8102e" },
105 [RTL_GIGA_MAC_VER_08] = {"RTL8102e" },
106 [RTL_GIGA_MAC_VER_09] = {"RTL8102e/RTL8103e" },
107 [RTL_GIGA_MAC_VER_10] = {"RTL8101e" },
108 [RTL_GIGA_MAC_VER_11] = {"RTL8168b/8111b" },
109 [RTL_GIGA_MAC_VER_12] = {"RTL8168b/8111b" },
110 [RTL_GIGA_MAC_VER_13] = {"RTL8101e" },
111 [RTL_GIGA_MAC_VER_14] = {"RTL8100e" },
112 [RTL_GIGA_MAC_VER_15] = {"RTL8100e" },
113 [RTL_GIGA_MAC_VER_16] = {"RTL8101e" },
114 [RTL_GIGA_MAC_VER_17] = {"RTL8168b/8111b" },
115 [RTL_GIGA_MAC_VER_18] = {"RTL8168cp/8111cp" },
116 [RTL_GIGA_MAC_VER_19] = {"RTL8168c/8111c" },
117 [RTL_GIGA_MAC_VER_20] = {"RTL8168c/8111c" },
118 [RTL_GIGA_MAC_VER_21] = {"RTL8168c/8111c" },
119 [RTL_GIGA_MAC_VER_22] = {"RTL8168c/8111c" },
120 [RTL_GIGA_MAC_VER_23] = {"RTL8168cp/8111cp" },
121 [RTL_GIGA_MAC_VER_24] = {"RTL8168cp/8111cp" },
122 [RTL_GIGA_MAC_VER_25] = {"RTL8168d/8111d", FIRMWARE_8168D_1},
123 [RTL_GIGA_MAC_VER_26] = {"RTL8168d/8111d", FIRMWARE_8168D_2},
124 [RTL_GIGA_MAC_VER_27] = {"RTL8168dp/8111dp" },
125 [RTL_GIGA_MAC_VER_28] = {"RTL8168dp/8111dp" },
126 [RTL_GIGA_MAC_VER_29] = {"RTL8105e", FIRMWARE_8105E_1},
127 [RTL_GIGA_MAC_VER_30] = {"RTL8105e", FIRMWARE_8105E_1},
128 [RTL_GIGA_MAC_VER_31] = {"RTL8168dp/8111dp" },
129 [RTL_GIGA_MAC_VER_32] = {"RTL8168e/8111e", FIRMWARE_8168E_1},
130 [RTL_GIGA_MAC_VER_33] = {"RTL8168e/8111e", FIRMWARE_8168E_2},
131 [RTL_GIGA_MAC_VER_34] = {"RTL8168evl/8111evl", FIRMWARE_8168E_3},
132 [RTL_GIGA_MAC_VER_35] = {"RTL8168f/8111f", FIRMWARE_8168F_1},
133 [RTL_GIGA_MAC_VER_36] = {"RTL8168f/8111f", FIRMWARE_8168F_2},
134 [RTL_GIGA_MAC_VER_37] = {"RTL8402", FIRMWARE_8402_1 },
135 [RTL_GIGA_MAC_VER_38] = {"RTL8411", FIRMWARE_8411_1 },
136 [RTL_GIGA_MAC_VER_39] = {"RTL8106e", FIRMWARE_8106E_1},
137 [RTL_GIGA_MAC_VER_40] = {"RTL8168g/8111g", FIRMWARE_8168G_2},
138 [RTL_GIGA_MAC_VER_41] = {"RTL8168g/8111g" },
139 [RTL_GIGA_MAC_VER_42] = {"RTL8168gu/8111gu", FIRMWARE_8168G_3},
140 [RTL_GIGA_MAC_VER_43] = {"RTL8106eus", FIRMWARE_8106E_2},
141 [RTL_GIGA_MAC_VER_44] = {"RTL8411b", FIRMWARE_8411_2 },
142 [RTL_GIGA_MAC_VER_45] = {"RTL8168h/8111h", FIRMWARE_8168H_1},
143 [RTL_GIGA_MAC_VER_46] = {"RTL8168h/8111h", FIRMWARE_8168H_2},
144 [RTL_GIGA_MAC_VER_47] = {"RTL8107e", FIRMWARE_8107E_1},
145 [RTL_GIGA_MAC_VER_48] = {"RTL8107e", FIRMWARE_8107E_2},
146 [RTL_GIGA_MAC_VER_49] = {"RTL8168ep/8111ep" },
147 [RTL_GIGA_MAC_VER_50] = {"RTL8168ep/8111ep" },
148 [RTL_GIGA_MAC_VER_51] = {"RTL8168ep/8111ep" },
149 [RTL_GIGA_MAC_VER_52] = {"RTL8168fp/RTL8117", FIRMWARE_8168FP_3},
150 [RTL_GIGA_MAC_VER_60] = {"RTL8125" },
151 [RTL_GIGA_MAC_VER_61] = {"RTL8125", FIRMWARE_8125A_3},
152};
153
154static const struct pci_device_id rtl8169_pci_tbl[] = {
155 { PCI_VDEVICE(REALTEK, 0x2502) },
156 { PCI_VDEVICE(REALTEK, 0x2600) },
157 { PCI_VDEVICE(REALTEK, 0x8129) },
158 { PCI_VDEVICE(REALTEK, 0x8136), RTL_CFG_NO_GBIT },
159 { PCI_VDEVICE(REALTEK, 0x8161) },
160 { PCI_VDEVICE(REALTEK, 0x8167) },
161 { PCI_VDEVICE(REALTEK, 0x8168) },
162 { PCI_VDEVICE(NCUBE, 0x8168) },
163 { PCI_VDEVICE(REALTEK, 0x8169) },
164 { PCI_VENDOR_ID_DLINK, 0x4300,
165 PCI_VENDOR_ID_DLINK, 0x4b10, 0, 0 },
166 { PCI_VDEVICE(DLINK, 0x4300) },
167 { PCI_VDEVICE(DLINK, 0x4302) },
168 { PCI_VDEVICE(AT, 0xc107) },
169 { PCI_VDEVICE(USR, 0x0116) },
170 { PCI_VENDOR_ID_LINKSYS, 0x1032, PCI_ANY_ID, 0x0024 },
171 { 0x0001, 0x8168, PCI_ANY_ID, 0x2410 },
172 { PCI_VDEVICE(REALTEK, 0x8125) },
173 { PCI_VDEVICE(REALTEK, 0x3000) },
174 {}
175};
176
177MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl);
178
179static struct {
180 u32 msg_enable;
181} debug = { -1 };
182
183enum rtl_registers {
184 MAC0 = 0,
185 MAC4 = 4,
186 MAR0 = 8,
187 CounterAddrLow = 0x10,
188 CounterAddrHigh = 0x14,
189 TxDescStartAddrLow = 0x20,
190 TxDescStartAddrHigh = 0x24,
191 TxHDescStartAddrLow = 0x28,
192 TxHDescStartAddrHigh = 0x2c,
193 FLASH = 0x30,
194 ERSR = 0x36,
195 ChipCmd = 0x37,
196 TxPoll = 0x38,
197 IntrMask = 0x3c,
198 IntrStatus = 0x3e,
199
200 TxConfig = 0x40,
201#define TXCFG_AUTO_FIFO (1 << 7)
202#define TXCFG_EMPTY (1 << 11)
203
204 RxConfig = 0x44,
205#define RX128_INT_EN (1 << 15)
206#define RX_MULTI_EN (1 << 14)
207#define RXCFG_FIFO_SHIFT 13
208
209#define RX_FIFO_THRESH (7 << RXCFG_FIFO_SHIFT)
210#define RX_EARLY_OFF (1 << 11)
211#define RXCFG_DMA_SHIFT 8
212
213#define RX_DMA_BURST (7 << RXCFG_DMA_SHIFT)
214
215 Cfg9346 = 0x50,
216 Config0 = 0x51,
217 Config1 = 0x52,
218 Config2 = 0x53,
219#define PME_SIGNAL (1 << 5)
220
221 Config3 = 0x54,
222 Config4 = 0x55,
223 Config5 = 0x56,
224 PHYAR = 0x60,
225 PHYstatus = 0x6c,
226 RxMaxSize = 0xda,
227 CPlusCmd = 0xe0,
228 IntrMitigate = 0xe2,
229
230#define RTL_COALESCE_MASK 0x0f
231#define RTL_COALESCE_SHIFT 4
232#define RTL_COALESCE_T_MAX (RTL_COALESCE_MASK)
233#define RTL_COALESCE_FRAME_MAX (RTL_COALESCE_MASK << 2)
234
235 RxDescAddrLow = 0xe4,
236 RxDescAddrHigh = 0xe8,
237 EarlyTxThres = 0xec,
238
239#define NoEarlyTx 0x3f
240
241 MaxTxPacketSize = 0xec,
242
243#define TxPacketMax (8064 >> 7)
244#define EarlySize 0x27
245
246 FuncEvent = 0xf0,
247 FuncEventMask = 0xf4,
248 FuncPresetState = 0xf8,
249 IBCR0 = 0xf8,
250 IBCR2 = 0xf9,
251 IBIMR0 = 0xfa,
252 IBISR0 = 0xfb,
253 FuncForceEvent = 0xfc,
254};
255
256enum rtl8168_8101_registers {
257 CSIDR = 0x64,
258 CSIAR = 0x68,
259#define CSIAR_FLAG 0x80000000
260#define CSIAR_WRITE_CMD 0x80000000
261#define CSIAR_BYTE_ENABLE 0x0000f000
262#define CSIAR_ADDR_MASK 0x00000fff
263 PMCH = 0x6f,
264 EPHYAR = 0x80,
265#define EPHYAR_FLAG 0x80000000
266#define EPHYAR_WRITE_CMD 0x80000000
267#define EPHYAR_REG_MASK 0x1f
268#define EPHYAR_REG_SHIFT 16
269#define EPHYAR_DATA_MASK 0xffff
270 DLLPR = 0xd0,
271#define PFM_EN (1 << 6)
272#define TX_10M_PS_EN (1 << 7)
273 DBG_REG = 0xd1,
274#define FIX_NAK_1 (1 << 4)
275#define FIX_NAK_2 (1 << 3)
276 TWSI = 0xd2,
277 MCU = 0xd3,
278#define NOW_IS_OOB (1 << 7)
279#define TX_EMPTY (1 << 5)
280#define RX_EMPTY (1 << 4)
281#define RXTX_EMPTY (TX_EMPTY | RX_EMPTY)
282#define EN_NDP (1 << 3)
283#define EN_OOB_RESET (1 << 2)
284#define LINK_LIST_RDY (1 << 1)
285 EFUSEAR = 0xdc,
286#define EFUSEAR_FLAG 0x80000000
287#define EFUSEAR_WRITE_CMD 0x80000000
288#define EFUSEAR_READ_CMD 0x00000000
289#define EFUSEAR_REG_MASK 0x03ff
290#define EFUSEAR_REG_SHIFT 8
291#define EFUSEAR_DATA_MASK 0xff
292 MISC_1 = 0xf2,
293#define PFM_D3COLD_EN (1 << 6)
294};
295
296enum rtl8168_registers {
297 LED_FREQ = 0x1a,
298 EEE_LED = 0x1b,
299 ERIDR = 0x70,
300 ERIAR = 0x74,
301#define ERIAR_FLAG 0x80000000
302#define ERIAR_WRITE_CMD 0x80000000
303#define ERIAR_READ_CMD 0x00000000
304#define ERIAR_ADDR_BYTE_ALIGN 4
305#define ERIAR_TYPE_SHIFT 16
306#define ERIAR_EXGMAC (0x00 << ERIAR_TYPE_SHIFT)
307#define ERIAR_MSIX (0x01 << ERIAR_TYPE_SHIFT)
308#define ERIAR_ASF (0x02 << ERIAR_TYPE_SHIFT)
309#define ERIAR_OOB (0x02 << ERIAR_TYPE_SHIFT)
310#define ERIAR_MASK_SHIFT 12
311#define ERIAR_MASK_0001 (0x1 << ERIAR_MASK_SHIFT)
312#define ERIAR_MASK_0011 (0x3 << ERIAR_MASK_SHIFT)
313#define ERIAR_MASK_0100 (0x4 << ERIAR_MASK_SHIFT)
314#define ERIAR_MASK_0101 (0x5 << ERIAR_MASK_SHIFT)
315#define ERIAR_MASK_1111 (0xf << ERIAR_MASK_SHIFT)
316 EPHY_RXER_NUM = 0x7c,
317 OCPDR = 0xb0,
318#define OCPDR_WRITE_CMD 0x80000000
319#define OCPDR_READ_CMD 0x00000000
320#define OCPDR_REG_MASK 0x7f
321#define OCPDR_GPHY_REG_SHIFT 16
322#define OCPDR_DATA_MASK 0xffff
323 OCPAR = 0xb4,
324#define OCPAR_FLAG 0x80000000
325#define OCPAR_GPHY_WRITE_CMD 0x8000f060
326#define OCPAR_GPHY_READ_CMD 0x0000f060
327 GPHY_OCP = 0xb8,
328 RDSAR1 = 0xd0,
329 MISC = 0xf0,
330#define TXPLA_RST (1 << 29)
331#define DISABLE_LAN_EN (1 << 23)
332#define PWM_EN (1 << 22)
333#define RXDV_GATED_EN (1 << 19)
334#define EARLY_TALLY_EN (1 << 16)
335};
336
337enum rtl8125_registers {
338 IntrMask_8125 = 0x38,
339 IntrStatus_8125 = 0x3c,
340 TxPoll_8125 = 0x90,
341 MAC0_BKP = 0x19e0,
342};
343
344#define RX_VLAN_INNER_8125 BIT(22)
345#define RX_VLAN_OUTER_8125 BIT(23)
346#define RX_VLAN_8125 (RX_VLAN_INNER_8125 | RX_VLAN_OUTER_8125)
347
348#define RX_FETCH_DFLT_8125 (8 << 27)
349
350enum rtl_register_content {
351
352 SYSErr = 0x8000,
353 PCSTimeout = 0x4000,
354 SWInt = 0x0100,
355 TxDescUnavail = 0x0080,
356 RxFIFOOver = 0x0040,
357 LinkChg = 0x0020,
358 RxOverflow = 0x0010,
359 TxErr = 0x0008,
360 TxOK = 0x0004,
361 RxErr = 0x0002,
362 RxOK = 0x0001,
363
364
365 RxRWT = (1 << 22),
366 RxRES = (1 << 21),
367 RxRUNT = (1 << 20),
368 RxCRC = (1 << 19),
369
370
371 StopReq = 0x80,
372 CmdReset = 0x10,
373 CmdRxEnb = 0x08,
374 CmdTxEnb = 0x04,
375 RxBufEmpty = 0x01,
376
377
378 HPQ = 0x80,
379 NPQ = 0x40,
380 FSWInt = 0x01,
381
382
383 Cfg9346_Lock = 0x00,
384 Cfg9346_Unlock = 0xc0,
385
386
387 AcceptErr = 0x20,
388 AcceptRunt = 0x10,
389 AcceptBroadcast = 0x08,
390 AcceptMulticast = 0x04,
391 AcceptMyPhys = 0x02,
392 AcceptAllPhys = 0x01,
393#define RX_CONFIG_ACCEPT_MASK 0x3f
394
395
396 TxInterFrameGapShift = 24,
397 TxDMAShift = 8,
398
399
400 LEDS1 = (1 << 7),
401 LEDS0 = (1 << 6),
402 Speed_down = (1 << 4),
403 MEMMAP = (1 << 3),
404 IOMAP = (1 << 2),
405 VPD = (1 << 1),
406 PMEnable = (1 << 0),
407
408
409 ClkReqEn = (1 << 7),
410 MSIEnable = (1 << 5),
411 PCI_Clock_66MHz = 0x01,
412 PCI_Clock_33MHz = 0x00,
413
414
415 MagicPacket = (1 << 5),
416 LinkUp = (1 << 4),
417 Jumbo_En0 = (1 << 2),
418 Rdy_to_L23 = (1 << 1),
419 Beacon_en = (1 << 0),
420
421
422 Jumbo_En1 = (1 << 1),
423
424
425 BWF = (1 << 6),
426 MWF = (1 << 5),
427 UWF = (1 << 4),
428 Spi_en = (1 << 3),
429 LanWake = (1 << 1),
430 PMEStatus = (1 << 0),
431 ASPM_en = (1 << 0),
432
433
434 EnableBist = (1 << 15),
435 Mac_dbgo_oe = (1 << 14),
436 EnAnaPLL = (1 << 14),
437 Normal_mode = (1 << 13),
438 Force_half_dup = (1 << 12),
439 Force_rxflow_en = (1 << 11),
440 Force_txflow_en = (1 << 10),
441 Cxpl_dbg_sel = (1 << 9),
442 ASF = (1 << 8),
443 PktCntrDisable = (1 << 7),
444 Mac_dbgo_sel = 0x001c,
445 RxVlan = (1 << 6),
446 RxChkSum = (1 << 5),
447 PCIDAC = (1 << 4),
448 PCIMulRW = (1 << 3),
449#define INTT_MASK GENMASK(1, 0)
450#define CPCMD_MASK (Normal_mode | RxVlan | RxChkSum | INTT_MASK)
451
452
453 TBI_Enable = 0x80,
454 TxFlowCtrl = 0x40,
455 RxFlowCtrl = 0x20,
456 _1000bpsF = 0x10,
457 _100bps = 0x08,
458 _10bps = 0x04,
459 LinkStatus = 0x02,
460 FullDup = 0x01,
461
462
463 CounterReset = 0x1,
464
465
466 CounterDump = 0x8,
467
468
469 MagicPacket_v2 = (1 << 16),
470};
471
472enum rtl_desc_bit {
473
474 DescOwn = (1 << 31),
475 RingEnd = (1 << 30),
476 FirstFrag = (1 << 29),
477 LastFrag = (1 << 28),
478};
479
480
481enum rtl_tx_desc_bit {
482
483 TD_LSO = (1 << 27),
484#define TD_MSS_MAX 0x07ffu
485
486
487 TxVlanTag = (1 << 17),
488};
489
490
491enum rtl_tx_desc_bit_0 {
492
493#define TD0_MSS_SHIFT 16
494 TD0_TCP_CS = (1 << 16),
495 TD0_UDP_CS = (1 << 17),
496 TD0_IP_CS = (1 << 18),
497};
498
499
500enum rtl_tx_desc_bit_1 {
501
502 TD1_GTSENV4 = (1 << 26),
503 TD1_GTSENV6 = (1 << 25),
504#define GTTCPHO_SHIFT 18
505#define GTTCPHO_MAX 0x7f
506
507
508#define TCPHO_SHIFT 18
509#define TCPHO_MAX 0x3ff
510#define TD1_MSS_SHIFT 18
511 TD1_IPv6_CS = (1 << 28),
512 TD1_IPv4_CS = (1 << 29),
513 TD1_TCP_CS = (1 << 30),
514 TD1_UDP_CS = (1 << 31),
515};
516
517enum rtl_rx_desc_bit {
518
519 PID1 = (1 << 18),
520 PID0 = (1 << 17),
521
522#define RxProtoUDP (PID1)
523#define RxProtoTCP (PID0)
524#define RxProtoIP (PID1 | PID0)
525#define RxProtoMask RxProtoIP
526
527 IPFail = (1 << 16),
528 UDPFail = (1 << 15),
529 TCPFail = (1 << 14),
530 RxVlanTag = (1 << 16),
531};
532
533#define RsvdMask 0x3fffc000
534
535#define RTL_GSO_MAX_SIZE_V1 32000
536#define RTL_GSO_MAX_SEGS_V1 24
537#define RTL_GSO_MAX_SIZE_V2 64000
538#define RTL_GSO_MAX_SEGS_V2 64
539
540struct TxDesc {
541 __le32 opts1;
542 __le32 opts2;
543 __le64 addr;
544};
545
546struct RxDesc {
547 __le32 opts1;
548 __le32 opts2;
549 __le64 addr;
550};
551
552struct ring_info {
553 struct sk_buff *skb;
554 u32 len;
555};
556
557struct rtl8169_counters {
558 __le64 tx_packets;
559 __le64 rx_packets;
560 __le64 tx_errors;
561 __le32 rx_errors;
562 __le16 rx_missed;
563 __le16 align_errors;
564 __le32 tx_one_collision;
565 __le32 tx_multi_collision;
566 __le64 rx_unicast;
567 __le64 rx_broadcast;
568 __le32 rx_multicast;
569 __le16 tx_aborted;
570 __le16 tx_underun;
571};
572
573struct rtl8169_tc_offsets {
574 bool inited;
575 __le64 tx_errors;
576 __le32 tx_multi_collision;
577 __le16 tx_aborted;
578 __le16 rx_missed;
579};
580
581enum rtl_flag {
582 RTL_FLAG_TASK_ENABLED = 0,
583 RTL_FLAG_TASK_RESET_PENDING,
584 RTL_FLAG_MAX
585};
586
587struct rtl8169_stats {
588 u64 packets;
589 u64 bytes;
590 struct u64_stats_sync syncp;
591};
592
593struct rtl8169_private {
594 void __iomem *mmio_addr;
595 struct pci_dev *pci_dev;
596 struct net_device *dev;
597 struct phy_device *phydev;
598 struct napi_struct napi;
599 u32 msg_enable;
600 enum mac_version mac_version;
601 u32 cur_rx;
602 u32 cur_tx;
603 u32 dirty_tx;
604 struct rtl8169_stats rx_stats;
605 struct rtl8169_stats tx_stats;
606 struct TxDesc *TxDescArray;
607 struct RxDesc *RxDescArray;
608 dma_addr_t TxPhyAddr;
609 dma_addr_t RxPhyAddr;
610 struct page *Rx_databuff[NUM_RX_DESC];
611 struct ring_info tx_skb[NUM_TX_DESC];
612 u16 cp_cmd;
613 u32 irq_mask;
614 struct clk *clk;
615
616 struct {
617 DECLARE_BITMAP(flags, RTL_FLAG_MAX);
618 struct mutex mutex;
619 struct work_struct work;
620 } wk;
621
622 unsigned irq_enabled:1;
623 unsigned supports_gmii:1;
624 unsigned aspm_manageable:1;
625 dma_addr_t counters_phys_addr;
626 struct rtl8169_counters *counters;
627 struct rtl8169_tc_offsets tc_offset;
628 u32 saved_wolopts;
629 int eee_adv;
630
631 const char *fw_name;
632 struct rtl_fw *rtl_fw;
633
634 u32 ocp_base;
635};
636
637typedef void (*rtl_generic_fct)(struct rtl8169_private *tp);
638
639MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>");
640MODULE_DESCRIPTION("RealTek RTL-8169 Gigabit Ethernet driver");
641module_param_named(debug, debug.msg_enable, int, 0);
642MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
643MODULE_SOFTDEP("pre: realtek");
644MODULE_LICENSE("GPL");
645MODULE_FIRMWARE(FIRMWARE_8168D_1);
646MODULE_FIRMWARE(FIRMWARE_8168D_2);
647MODULE_FIRMWARE(FIRMWARE_8168E_1);
648MODULE_FIRMWARE(FIRMWARE_8168E_2);
649MODULE_FIRMWARE(FIRMWARE_8168E_3);
650MODULE_FIRMWARE(FIRMWARE_8105E_1);
651MODULE_FIRMWARE(FIRMWARE_8168F_1);
652MODULE_FIRMWARE(FIRMWARE_8168F_2);
653MODULE_FIRMWARE(FIRMWARE_8402_1);
654MODULE_FIRMWARE(FIRMWARE_8411_1);
655MODULE_FIRMWARE(FIRMWARE_8411_2);
656MODULE_FIRMWARE(FIRMWARE_8106E_1);
657MODULE_FIRMWARE(FIRMWARE_8106E_2);
658MODULE_FIRMWARE(FIRMWARE_8168G_2);
659MODULE_FIRMWARE(FIRMWARE_8168G_3);
660MODULE_FIRMWARE(FIRMWARE_8168H_1);
661MODULE_FIRMWARE(FIRMWARE_8168H_2);
662MODULE_FIRMWARE(FIRMWARE_8168FP_3);
663MODULE_FIRMWARE(FIRMWARE_8107E_1);
664MODULE_FIRMWARE(FIRMWARE_8107E_2);
665MODULE_FIRMWARE(FIRMWARE_8125A_3);
666
667static inline struct device *tp_to_dev(struct rtl8169_private *tp)
668{
669 return &tp->pci_dev->dev;
670}
671
672static void rtl_lock_work(struct rtl8169_private *tp)
673{
674 mutex_lock(&tp->wk.mutex);
675}
676
677static void rtl_unlock_work(struct rtl8169_private *tp)
678{
679 mutex_unlock(&tp->wk.mutex);
680}
681
682static void rtl_lock_config_regs(struct rtl8169_private *tp)
683{
684 RTL_W8(tp, Cfg9346, Cfg9346_Lock);
685}
686
687static void rtl_unlock_config_regs(struct rtl8169_private *tp)
688{
689 RTL_W8(tp, Cfg9346, Cfg9346_Unlock);
690}
691
692static void rtl_pci_commit(struct rtl8169_private *tp)
693{
694
695 RTL_R8(tp, ChipCmd);
696}
697
698static bool rtl_is_8125(struct rtl8169_private *tp)
699{
700 return tp->mac_version >= RTL_GIGA_MAC_VER_60;
701}
702
703static bool rtl_is_8168evl_up(struct rtl8169_private *tp)
704{
705 return tp->mac_version >= RTL_GIGA_MAC_VER_34 &&
706 tp->mac_version != RTL_GIGA_MAC_VER_39 &&
707 tp->mac_version <= RTL_GIGA_MAC_VER_52;
708}
709
710static bool rtl_supports_eee(struct rtl8169_private *tp)
711{
712 return tp->mac_version >= RTL_GIGA_MAC_VER_34 &&
713 tp->mac_version != RTL_GIGA_MAC_VER_37 &&
714 tp->mac_version != RTL_GIGA_MAC_VER_39;
715}
716
717static void rtl_read_mac_from_reg(struct rtl8169_private *tp, u8 *mac, int reg)
718{
719 int i;
720
721 for (i = 0; i < ETH_ALEN; i++)
722 mac[i] = RTL_R8(tp, reg + i);
723}
724
725struct rtl_cond {
726 bool (*check)(struct rtl8169_private *);
727 const char *msg;
728};
729
730static void rtl_udelay(unsigned int d)
731{
732 udelay(d);
733}
734
735static bool rtl_loop_wait(struct rtl8169_private *tp, const struct rtl_cond *c,
736 void (*delay)(unsigned int), unsigned int d, int n,
737 bool high)
738{
739 int i;
740
741 for (i = 0; i < n; i++) {
742 if (c->check(tp) == high)
743 return true;
744 delay(d);
745 }
746 netif_err(tp, drv, tp->dev, "%s == %d (loop: %d, delay: %d).\n",
747 c->msg, !high, n, d);
748 return false;
749}
750
751static bool rtl_udelay_loop_wait_high(struct rtl8169_private *tp,
752 const struct rtl_cond *c,
753 unsigned int d, int n)
754{
755 return rtl_loop_wait(tp, c, rtl_udelay, d, n, true);
756}
757
758static bool rtl_udelay_loop_wait_low(struct rtl8169_private *tp,
759 const struct rtl_cond *c,
760 unsigned int d, int n)
761{
762 return rtl_loop_wait(tp, c, rtl_udelay, d, n, false);
763}
764
765static bool rtl_msleep_loop_wait_high(struct rtl8169_private *tp,
766 const struct rtl_cond *c,
767 unsigned int d, int n)
768{
769 return rtl_loop_wait(tp, c, msleep, d, n, true);
770}
771
772static bool rtl_msleep_loop_wait_low(struct rtl8169_private *tp,
773 const struct rtl_cond *c,
774 unsigned int d, int n)
775{
776 return rtl_loop_wait(tp, c, msleep, d, n, false);
777}
778
779#define DECLARE_RTL_COND(name) \
780static bool name ## _check(struct rtl8169_private *); \
781 \
782static const struct rtl_cond name = { \
783 .check = name ## _check, \
784 .msg = #name \
785}; \
786 \
787static bool name ## _check(struct rtl8169_private *tp)
788
789static bool rtl_ocp_reg_failure(struct rtl8169_private *tp, u32 reg)
790{
791 if (reg & 0xffff0001) {
792 netif_err(tp, drv, tp->dev, "Invalid ocp reg %x!\n", reg);
793 return true;
794 }
795 return false;
796}
797
798DECLARE_RTL_COND(rtl_ocp_gphy_cond)
799{
800 return RTL_R32(tp, GPHY_OCP) & OCPAR_FLAG;
801}
802
803static void r8168_phy_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data)
804{
805 if (rtl_ocp_reg_failure(tp, reg))
806 return;
807
808 RTL_W32(tp, GPHY_OCP, OCPAR_FLAG | (reg << 15) | data);
809
810 rtl_udelay_loop_wait_low(tp, &rtl_ocp_gphy_cond, 25, 10);
811}
812
813static int r8168_phy_ocp_read(struct rtl8169_private *tp, u32 reg)
814{
815 if (rtl_ocp_reg_failure(tp, reg))
816 return 0;
817
818 RTL_W32(tp, GPHY_OCP, reg << 15);
819
820 return rtl_udelay_loop_wait_high(tp, &rtl_ocp_gphy_cond, 25, 10) ?
821 (RTL_R32(tp, GPHY_OCP) & 0xffff) : -ETIMEDOUT;
822}
823
824static void r8168_mac_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data)
825{
826 if (rtl_ocp_reg_failure(tp, reg))
827 return;
828
829 RTL_W32(tp, OCPDR, OCPAR_FLAG | (reg << 15) | data);
830}
831
832static u16 r8168_mac_ocp_read(struct rtl8169_private *tp, u32 reg)
833{
834 if (rtl_ocp_reg_failure(tp, reg))
835 return 0;
836
837 RTL_W32(tp, OCPDR, reg << 15);
838
839 return RTL_R32(tp, OCPDR);
840}
841
842static void r8168_mac_ocp_modify(struct rtl8169_private *tp, u32 reg, u16 mask,
843 u16 set)
844{
845 u16 data = r8168_mac_ocp_read(tp, reg);
846
847 r8168_mac_ocp_write(tp, reg, (data & ~mask) | set);
848}
849
850#define OCP_STD_PHY_BASE 0xa400
851
852static void r8168g_mdio_write(struct rtl8169_private *tp, int reg, int value)
853{
854 if (reg == 0x1f) {
855 tp->ocp_base = value ? value << 4 : OCP_STD_PHY_BASE;
856 return;
857 }
858
859 if (tp->ocp_base != OCP_STD_PHY_BASE)
860 reg -= 0x10;
861
862 r8168_phy_ocp_write(tp, tp->ocp_base + reg * 2, value);
863}
864
865static int r8168g_mdio_read(struct rtl8169_private *tp, int reg)
866{
867 if (reg == 0x1f)
868 return tp->ocp_base == OCP_STD_PHY_BASE ? 0 : tp->ocp_base >> 4;
869
870 if (tp->ocp_base != OCP_STD_PHY_BASE)
871 reg -= 0x10;
872
873 return r8168_phy_ocp_read(tp, tp->ocp_base + reg * 2);
874}
875
876static void mac_mcu_write(struct rtl8169_private *tp, int reg, int value)
877{
878 if (reg == 0x1f) {
879 tp->ocp_base = value << 4;
880 return;
881 }
882
883 r8168_mac_ocp_write(tp, tp->ocp_base + reg, value);
884}
885
886static int mac_mcu_read(struct rtl8169_private *tp, int reg)
887{
888 return r8168_mac_ocp_read(tp, tp->ocp_base + reg);
889}
890
891DECLARE_RTL_COND(rtl_phyar_cond)
892{
893 return RTL_R32(tp, PHYAR) & 0x80000000;
894}
895
896static void r8169_mdio_write(struct rtl8169_private *tp, int reg, int value)
897{
898 RTL_W32(tp, PHYAR, 0x80000000 | (reg & 0x1f) << 16 | (value & 0xffff));
899
900 rtl_udelay_loop_wait_low(tp, &rtl_phyar_cond, 25, 20);
901
902
903
904
905 udelay(20);
906}
907
908static int r8169_mdio_read(struct rtl8169_private *tp, int reg)
909{
910 int value;
911
912 RTL_W32(tp, PHYAR, 0x0 | (reg & 0x1f) << 16);
913
914 value = rtl_udelay_loop_wait_high(tp, &rtl_phyar_cond, 25, 20) ?
915 RTL_R32(tp, PHYAR) & 0xffff : -ETIMEDOUT;
916
917
918
919
920
921 udelay(20);
922
923 return value;
924}
925
926DECLARE_RTL_COND(rtl_ocpar_cond)
927{
928 return RTL_R32(tp, OCPAR) & OCPAR_FLAG;
929}
930
931static void r8168dp_1_mdio_access(struct rtl8169_private *tp, int reg, u32 data)
932{
933 RTL_W32(tp, OCPDR, data | ((reg & OCPDR_REG_MASK) << OCPDR_GPHY_REG_SHIFT));
934 RTL_W32(tp, OCPAR, OCPAR_GPHY_WRITE_CMD);
935 RTL_W32(tp, EPHY_RXER_NUM, 0);
936
937 rtl_udelay_loop_wait_low(tp, &rtl_ocpar_cond, 1000, 100);
938}
939
940static void r8168dp_1_mdio_write(struct rtl8169_private *tp, int reg, int value)
941{
942 r8168dp_1_mdio_access(tp, reg,
943 OCPDR_WRITE_CMD | (value & OCPDR_DATA_MASK));
944}
945
946static int r8168dp_1_mdio_read(struct rtl8169_private *tp, int reg)
947{
948 r8168dp_1_mdio_access(tp, reg, OCPDR_READ_CMD);
949
950 mdelay(1);
951 RTL_W32(tp, OCPAR, OCPAR_GPHY_READ_CMD);
952 RTL_W32(tp, EPHY_RXER_NUM, 0);
953
954 return rtl_udelay_loop_wait_high(tp, &rtl_ocpar_cond, 1000, 100) ?
955 RTL_R32(tp, OCPDR) & OCPDR_DATA_MASK : -ETIMEDOUT;
956}
957
958#define R8168DP_1_MDIO_ACCESS_BIT 0x00020000
959
960static void r8168dp_2_mdio_start(struct rtl8169_private *tp)
961{
962 RTL_W32(tp, 0xd0, RTL_R32(tp, 0xd0) & ~R8168DP_1_MDIO_ACCESS_BIT);
963}
964
965static void r8168dp_2_mdio_stop(struct rtl8169_private *tp)
966{
967 RTL_W32(tp, 0xd0, RTL_R32(tp, 0xd0) | R8168DP_1_MDIO_ACCESS_BIT);
968}
969
970static void r8168dp_2_mdio_write(struct rtl8169_private *tp, int reg, int value)
971{
972 r8168dp_2_mdio_start(tp);
973
974 r8169_mdio_write(tp, reg, value);
975
976 r8168dp_2_mdio_stop(tp);
977}
978
979static int r8168dp_2_mdio_read(struct rtl8169_private *tp, int reg)
980{
981 int value;
982
983
984 if (reg == MII_PHYSID2)
985 return 0xc912;
986
987 r8168dp_2_mdio_start(tp);
988
989 value = r8169_mdio_read(tp, reg);
990
991 r8168dp_2_mdio_stop(tp);
992
993 return value;
994}
995
996static void rtl_writephy(struct rtl8169_private *tp, int location, int val)
997{
998 switch (tp->mac_version) {
999 case RTL_GIGA_MAC_VER_27:
1000 r8168dp_1_mdio_write(tp, location, val);
1001 break;
1002 case RTL_GIGA_MAC_VER_28:
1003 case RTL_GIGA_MAC_VER_31:
1004 r8168dp_2_mdio_write(tp, location, val);
1005 break;
1006 case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_61:
1007 r8168g_mdio_write(tp, location, val);
1008 break;
1009 default:
1010 r8169_mdio_write(tp, location, val);
1011 break;
1012 }
1013}
1014
1015static int rtl_readphy(struct rtl8169_private *tp, int location)
1016{
1017 switch (tp->mac_version) {
1018 case RTL_GIGA_MAC_VER_27:
1019 return r8168dp_1_mdio_read(tp, location);
1020 case RTL_GIGA_MAC_VER_28:
1021 case RTL_GIGA_MAC_VER_31:
1022 return r8168dp_2_mdio_read(tp, location);
1023 case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_61:
1024 return r8168g_mdio_read(tp, location);
1025 default:
1026 return r8169_mdio_read(tp, location);
1027 }
1028}
1029
1030DECLARE_RTL_COND(rtl_ephyar_cond)
1031{
1032 return RTL_R32(tp, EPHYAR) & EPHYAR_FLAG;
1033}
1034
1035static void rtl_ephy_write(struct rtl8169_private *tp, int reg_addr, int value)
1036{
1037 RTL_W32(tp, EPHYAR, EPHYAR_WRITE_CMD | (value & EPHYAR_DATA_MASK) |
1038 (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
1039
1040 rtl_udelay_loop_wait_low(tp, &rtl_ephyar_cond, 10, 100);
1041
1042 udelay(10);
1043}
1044
1045static u16 rtl_ephy_read(struct rtl8169_private *tp, int reg_addr)
1046{
1047 RTL_W32(tp, EPHYAR, (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
1048
1049 return rtl_udelay_loop_wait_high(tp, &rtl_ephyar_cond, 10, 100) ?
1050 RTL_R32(tp, EPHYAR) & EPHYAR_DATA_MASK : ~0;
1051}
1052
1053static void r8168fp_adjust_ocp_cmd(struct rtl8169_private *tp, u32 *cmd, int type)
1054{
1055
1056 if (tp->mac_version == RTL_GIGA_MAC_VER_52 && type == ERIAR_OOB)
1057 *cmd |= 0x7f0 << 18;
1058}
1059
1060DECLARE_RTL_COND(rtl_eriar_cond)
1061{
1062 return RTL_R32(tp, ERIAR) & ERIAR_FLAG;
1063}
1064
1065static void _rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask,
1066 u32 val, int type)
1067{
1068 u32 cmd = ERIAR_WRITE_CMD | type | mask | addr;
1069
1070 BUG_ON((addr & 3) || (mask == 0));
1071 RTL_W32(tp, ERIDR, val);
1072 r8168fp_adjust_ocp_cmd(tp, &cmd, type);
1073 RTL_W32(tp, ERIAR, cmd);
1074
1075 rtl_udelay_loop_wait_low(tp, &rtl_eriar_cond, 100, 100);
1076}
1077
1078static void rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask,
1079 u32 val)
1080{
1081 _rtl_eri_write(tp, addr, mask, val, ERIAR_EXGMAC);
1082}
1083
1084static u32 _rtl_eri_read(struct rtl8169_private *tp, int addr, int type)
1085{
1086 u32 cmd = ERIAR_READ_CMD | type | ERIAR_MASK_1111 | addr;
1087
1088 r8168fp_adjust_ocp_cmd(tp, &cmd, type);
1089 RTL_W32(tp, ERIAR, cmd);
1090
1091 return rtl_udelay_loop_wait_high(tp, &rtl_eriar_cond, 100, 100) ?
1092 RTL_R32(tp, ERIDR) : ~0;
1093}
1094
1095static u32 rtl_eri_read(struct rtl8169_private *tp, int addr)
1096{
1097 return _rtl_eri_read(tp, addr, ERIAR_EXGMAC);
1098}
1099
1100static void rtl_w0w1_eri(struct rtl8169_private *tp, int addr, u32 mask, u32 p,
1101 u32 m)
1102{
1103 u32 val;
1104
1105 val = rtl_eri_read(tp, addr);
1106 rtl_eri_write(tp, addr, mask, (val & ~m) | p);
1107}
1108
1109static void rtl_eri_set_bits(struct rtl8169_private *tp, int addr, u32 mask,
1110 u32 p)
1111{
1112 rtl_w0w1_eri(tp, addr, mask, p, 0);
1113}
1114
1115static void rtl_eri_clear_bits(struct rtl8169_private *tp, int addr, u32 mask,
1116 u32 m)
1117{
1118 rtl_w0w1_eri(tp, addr, mask, 0, m);
1119}
1120
1121static u32 r8168dp_ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg)
1122{
1123 RTL_W32(tp, OCPAR, ((u32)mask & 0x0f) << 12 | (reg & 0x0fff));
1124 return rtl_udelay_loop_wait_high(tp, &rtl_ocpar_cond, 100, 20) ?
1125 RTL_R32(tp, OCPDR) : ~0;
1126}
1127
1128static u32 r8168ep_ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg)
1129{
1130 return _rtl_eri_read(tp, reg, ERIAR_OOB);
1131}
1132
1133static void r8168dp_ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg,
1134 u32 data)
1135{
1136 RTL_W32(tp, OCPDR, data);
1137 RTL_W32(tp, OCPAR, OCPAR_FLAG | ((u32)mask & 0x0f) << 12 | (reg & 0x0fff));
1138 rtl_udelay_loop_wait_low(tp, &rtl_ocpar_cond, 100, 20);
1139}
1140
1141static void r8168ep_ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg,
1142 u32 data)
1143{
1144 _rtl_eri_write(tp, reg, ((u32)mask & 0x0f) << ERIAR_MASK_SHIFT,
1145 data, ERIAR_OOB);
1146}
1147
1148static void r8168dp_oob_notify(struct rtl8169_private *tp, u8 cmd)
1149{
1150 rtl_eri_write(tp, 0xe8, ERIAR_MASK_0001, cmd);
1151
1152 r8168dp_ocp_write(tp, 0x1, 0x30, 0x00000001);
1153}
1154
1155#define OOB_CMD_RESET 0x00
1156#define OOB_CMD_DRIVER_START 0x05
1157#define OOB_CMD_DRIVER_STOP 0x06
1158
1159static u16 rtl8168_get_ocp_reg(struct rtl8169_private *tp)
1160{
1161 return (tp->mac_version == RTL_GIGA_MAC_VER_31) ? 0xb8 : 0x10;
1162}
1163
1164DECLARE_RTL_COND(rtl_dp_ocp_read_cond)
1165{
1166 u16 reg;
1167
1168 reg = rtl8168_get_ocp_reg(tp);
1169
1170 return r8168dp_ocp_read(tp, 0x0f, reg) & 0x00000800;
1171}
1172
1173DECLARE_RTL_COND(rtl_ep_ocp_read_cond)
1174{
1175 return r8168ep_ocp_read(tp, 0x0f, 0x124) & 0x00000001;
1176}
1177
1178DECLARE_RTL_COND(rtl_ocp_tx_cond)
1179{
1180 return RTL_R8(tp, IBISR0) & 0x20;
1181}
1182
1183static void rtl8168ep_stop_cmac(struct rtl8169_private *tp)
1184{
1185 RTL_W8(tp, IBCR2, RTL_R8(tp, IBCR2) & ~0x01);
1186 rtl_msleep_loop_wait_high(tp, &rtl_ocp_tx_cond, 50, 2000);
1187 RTL_W8(tp, IBISR0, RTL_R8(tp, IBISR0) | 0x20);
1188 RTL_W8(tp, IBCR0, RTL_R8(tp, IBCR0) & ~0x01);
1189}
1190
1191static void rtl8168dp_driver_start(struct rtl8169_private *tp)
1192{
1193 r8168dp_oob_notify(tp, OOB_CMD_DRIVER_START);
1194 rtl_msleep_loop_wait_high(tp, &rtl_dp_ocp_read_cond, 10, 10);
1195}
1196
1197static void rtl8168ep_driver_start(struct rtl8169_private *tp)
1198{
1199 r8168ep_ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_START);
1200 r8168ep_ocp_write(tp, 0x01, 0x30,
1201 r8168ep_ocp_read(tp, 0x01, 0x30) | 0x01);
1202 rtl_msleep_loop_wait_high(tp, &rtl_ep_ocp_read_cond, 10, 10);
1203}
1204
1205static void rtl8168_driver_start(struct rtl8169_private *tp)
1206{
1207 switch (tp->mac_version) {
1208 case RTL_GIGA_MAC_VER_27:
1209 case RTL_GIGA_MAC_VER_28:
1210 case RTL_GIGA_MAC_VER_31:
1211 rtl8168dp_driver_start(tp);
1212 break;
1213 case RTL_GIGA_MAC_VER_49 ... RTL_GIGA_MAC_VER_52:
1214 rtl8168ep_driver_start(tp);
1215 break;
1216 default:
1217 BUG();
1218 break;
1219 }
1220}
1221
1222static void rtl8168dp_driver_stop(struct rtl8169_private *tp)
1223{
1224 r8168dp_oob_notify(tp, OOB_CMD_DRIVER_STOP);
1225 rtl_msleep_loop_wait_low(tp, &rtl_dp_ocp_read_cond, 10, 10);
1226}
1227
1228static void rtl8168ep_driver_stop(struct rtl8169_private *tp)
1229{
1230 rtl8168ep_stop_cmac(tp);
1231 r8168ep_ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_STOP);
1232 r8168ep_ocp_write(tp, 0x01, 0x30,
1233 r8168ep_ocp_read(tp, 0x01, 0x30) | 0x01);
1234 rtl_msleep_loop_wait_low(tp, &rtl_ep_ocp_read_cond, 10, 10);
1235}
1236
1237static void rtl8168_driver_stop(struct rtl8169_private *tp)
1238{
1239 switch (tp->mac_version) {
1240 case RTL_GIGA_MAC_VER_27:
1241 case RTL_GIGA_MAC_VER_28:
1242 case RTL_GIGA_MAC_VER_31:
1243 rtl8168dp_driver_stop(tp);
1244 break;
1245 case RTL_GIGA_MAC_VER_49 ... RTL_GIGA_MAC_VER_52:
1246 rtl8168ep_driver_stop(tp);
1247 break;
1248 default:
1249 BUG();
1250 break;
1251 }
1252}
1253
1254static bool r8168dp_check_dash(struct rtl8169_private *tp)
1255{
1256 u16 reg = rtl8168_get_ocp_reg(tp);
1257
1258 return !!(r8168dp_ocp_read(tp, 0x0f, reg) & 0x00008000);
1259}
1260
1261static bool r8168ep_check_dash(struct rtl8169_private *tp)
1262{
1263 return !!(r8168ep_ocp_read(tp, 0x0f, 0x128) & 0x00000001);
1264}
1265
1266static bool r8168_check_dash(struct rtl8169_private *tp)
1267{
1268 switch (tp->mac_version) {
1269 case RTL_GIGA_MAC_VER_27:
1270 case RTL_GIGA_MAC_VER_28:
1271 case RTL_GIGA_MAC_VER_31:
1272 return r8168dp_check_dash(tp);
1273 case RTL_GIGA_MAC_VER_49 ... RTL_GIGA_MAC_VER_52:
1274 return r8168ep_check_dash(tp);
1275 default:
1276 return false;
1277 }
1278}
1279
1280static void rtl_reset_packet_filter(struct rtl8169_private *tp)
1281{
1282 rtl_eri_clear_bits(tp, 0xdc, ERIAR_MASK_0001, BIT(0));
1283 rtl_eri_set_bits(tp, 0xdc, ERIAR_MASK_0001, BIT(0));
1284}
1285
1286DECLARE_RTL_COND(rtl_efusear_cond)
1287{
1288 return RTL_R32(tp, EFUSEAR) & EFUSEAR_FLAG;
1289}
1290
1291u8 rtl8168d_efuse_read(struct rtl8169_private *tp, int reg_addr)
1292{
1293 RTL_W32(tp, EFUSEAR, (reg_addr & EFUSEAR_REG_MASK) << EFUSEAR_REG_SHIFT);
1294
1295 return rtl_udelay_loop_wait_high(tp, &rtl_efusear_cond, 100, 300) ?
1296 RTL_R32(tp, EFUSEAR) & EFUSEAR_DATA_MASK : ~0;
1297}
1298
1299static u32 rtl_get_events(struct rtl8169_private *tp)
1300{
1301 if (rtl_is_8125(tp))
1302 return RTL_R32(tp, IntrStatus_8125);
1303 else
1304 return RTL_R16(tp, IntrStatus);
1305}
1306
1307static void rtl_ack_events(struct rtl8169_private *tp, u32 bits)
1308{
1309 if (rtl_is_8125(tp))
1310 RTL_W32(tp, IntrStatus_8125, bits);
1311 else
1312 RTL_W16(tp, IntrStatus, bits);
1313}
1314
1315static void rtl_irq_disable(struct rtl8169_private *tp)
1316{
1317 if (rtl_is_8125(tp))
1318 RTL_W32(tp, IntrMask_8125, 0);
1319 else
1320 RTL_W16(tp, IntrMask, 0);
1321 tp->irq_enabled = 0;
1322}
1323
1324static void rtl_irq_enable(struct rtl8169_private *tp)
1325{
1326 tp->irq_enabled = 1;
1327 if (rtl_is_8125(tp))
1328 RTL_W32(tp, IntrMask_8125, tp->irq_mask);
1329 else
1330 RTL_W16(tp, IntrMask, tp->irq_mask);
1331}
1332
1333static void rtl8169_irq_mask_and_ack(struct rtl8169_private *tp)
1334{
1335 rtl_irq_disable(tp);
1336 rtl_ack_events(tp, 0xffffffff);
1337 rtl_pci_commit(tp);
1338}
1339
1340static void rtl_link_chg_patch(struct rtl8169_private *tp)
1341{
1342 struct phy_device *phydev = tp->phydev;
1343
1344 if (tp->mac_version == RTL_GIGA_MAC_VER_34 ||
1345 tp->mac_version == RTL_GIGA_MAC_VER_38) {
1346 if (phydev->speed == SPEED_1000) {
1347 rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011);
1348 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005);
1349 } else if (phydev->speed == SPEED_100) {
1350 rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f);
1351 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005);
1352 } else {
1353 rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f);
1354 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f);
1355 }
1356 rtl_reset_packet_filter(tp);
1357 } else if (tp->mac_version == RTL_GIGA_MAC_VER_35 ||
1358 tp->mac_version == RTL_GIGA_MAC_VER_36) {
1359 if (phydev->speed == SPEED_1000) {
1360 rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011);
1361 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005);
1362 } else {
1363 rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f);
1364 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f);
1365 }
1366 } else if (tp->mac_version == RTL_GIGA_MAC_VER_37) {
1367 if (phydev->speed == SPEED_10) {
1368 rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x4d02);
1369 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_0011, 0x0060a);
1370 } else {
1371 rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000);
1372 }
1373 }
1374}
1375
1376#define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)
1377
1378static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1379{
1380 struct rtl8169_private *tp = netdev_priv(dev);
1381
1382 rtl_lock_work(tp);
1383 wol->supported = WAKE_ANY;
1384 wol->wolopts = tp->saved_wolopts;
1385 rtl_unlock_work(tp);
1386}
1387
1388static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
1389{
1390 static const struct {
1391 u32 opt;
1392 u16 reg;
1393 u8 mask;
1394 } cfg[] = {
1395 { WAKE_PHY, Config3, LinkUp },
1396 { WAKE_UCAST, Config5, UWF },
1397 { WAKE_BCAST, Config5, BWF },
1398 { WAKE_MCAST, Config5, MWF },
1399 { WAKE_ANY, Config5, LanWake },
1400 { WAKE_MAGIC, Config3, MagicPacket }
1401 };
1402 unsigned int i, tmp = ARRAY_SIZE(cfg);
1403 u8 options;
1404
1405 rtl_unlock_config_regs(tp);
1406
1407 if (rtl_is_8168evl_up(tp)) {
1408 tmp--;
1409 if (wolopts & WAKE_MAGIC)
1410 rtl_eri_set_bits(tp, 0x0dc, ERIAR_MASK_0100,
1411 MagicPacket_v2);
1412 else
1413 rtl_eri_clear_bits(tp, 0x0dc, ERIAR_MASK_0100,
1414 MagicPacket_v2);
1415 } else if (rtl_is_8125(tp)) {
1416 tmp--;
1417 if (wolopts & WAKE_MAGIC)
1418 r8168_mac_ocp_modify(tp, 0xc0b6, 0, BIT(0));
1419 else
1420 r8168_mac_ocp_modify(tp, 0xc0b6, BIT(0), 0);
1421 }
1422
1423 for (i = 0; i < tmp; i++) {
1424 options = RTL_R8(tp, cfg[i].reg) & ~cfg[i].mask;
1425 if (wolopts & cfg[i].opt)
1426 options |= cfg[i].mask;
1427 RTL_W8(tp, cfg[i].reg, options);
1428 }
1429
1430 switch (tp->mac_version) {
1431 case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06:
1432 options = RTL_R8(tp, Config1) & ~PMEnable;
1433 if (wolopts)
1434 options |= PMEnable;
1435 RTL_W8(tp, Config1, options);
1436 break;
1437 case RTL_GIGA_MAC_VER_34:
1438 case RTL_GIGA_MAC_VER_37:
1439 case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_52:
1440 options = RTL_R8(tp, Config2) & ~PME_SIGNAL;
1441 if (wolopts)
1442 options |= PME_SIGNAL;
1443 RTL_W8(tp, Config2, options);
1444 break;
1445 default:
1446 break;
1447 }
1448
1449 rtl_lock_config_regs(tp);
1450
1451 device_set_wakeup_enable(tp_to_dev(tp), wolopts);
1452 tp->dev->wol_enabled = wolopts ? 1 : 0;
1453}
1454
1455static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1456{
1457 struct rtl8169_private *tp = netdev_priv(dev);
1458 struct device *d = tp_to_dev(tp);
1459
1460 if (wol->wolopts & ~WAKE_ANY)
1461 return -EINVAL;
1462
1463 pm_runtime_get_noresume(d);
1464
1465 rtl_lock_work(tp);
1466
1467 tp->saved_wolopts = wol->wolopts;
1468
1469 if (pm_runtime_active(d))
1470 __rtl8169_set_wol(tp, tp->saved_wolopts);
1471
1472 rtl_unlock_work(tp);
1473
1474 pm_runtime_put_noidle(d);
1475
1476 return 0;
1477}
1478
1479static void rtl8169_get_drvinfo(struct net_device *dev,
1480 struct ethtool_drvinfo *info)
1481{
1482 struct rtl8169_private *tp = netdev_priv(dev);
1483 struct rtl_fw *rtl_fw = tp->rtl_fw;
1484
1485 strlcpy(info->driver, MODULENAME, sizeof(info->driver));
1486 strlcpy(info->bus_info, pci_name(tp->pci_dev), sizeof(info->bus_info));
1487 BUILD_BUG_ON(sizeof(info->fw_version) < sizeof(rtl_fw->version));
1488 if (rtl_fw)
1489 strlcpy(info->fw_version, rtl_fw->version,
1490 sizeof(info->fw_version));
1491}
1492
1493static int rtl8169_get_regs_len(struct net_device *dev)
1494{
1495 return R8169_REGS_SIZE;
1496}
1497
1498static netdev_features_t rtl8169_fix_features(struct net_device *dev,
1499 netdev_features_t features)
1500{
1501 struct rtl8169_private *tp = netdev_priv(dev);
1502
1503 if (dev->mtu > TD_MSS_MAX)
1504 features &= ~NETIF_F_ALL_TSO;
1505
1506 if (dev->mtu > ETH_DATA_LEN &&
1507 tp->mac_version > RTL_GIGA_MAC_VER_06)
1508 features &= ~(NETIF_F_CSUM_MASK | NETIF_F_ALL_TSO);
1509
1510 return features;
1511}
1512
1513static int rtl8169_set_features(struct net_device *dev,
1514 netdev_features_t features)
1515{
1516 struct rtl8169_private *tp = netdev_priv(dev);
1517 u32 rx_config;
1518
1519 rtl_lock_work(tp);
1520
1521 rx_config = RTL_R32(tp, RxConfig);
1522 if (features & NETIF_F_RXALL)
1523 rx_config |= (AcceptErr | AcceptRunt);
1524 else
1525 rx_config &= ~(AcceptErr | AcceptRunt);
1526
1527 if (rtl_is_8125(tp)) {
1528 if (features & NETIF_F_HW_VLAN_CTAG_RX)
1529 rx_config |= RX_VLAN_8125;
1530 else
1531 rx_config &= ~RX_VLAN_8125;
1532 }
1533
1534 RTL_W32(tp, RxConfig, rx_config);
1535
1536 if (features & NETIF_F_RXCSUM)
1537 tp->cp_cmd |= RxChkSum;
1538 else
1539 tp->cp_cmd &= ~RxChkSum;
1540
1541 if (!rtl_is_8125(tp)) {
1542 if (features & NETIF_F_HW_VLAN_CTAG_RX)
1543 tp->cp_cmd |= RxVlan;
1544 else
1545 tp->cp_cmd &= ~RxVlan;
1546 }
1547
1548 RTL_W16(tp, CPlusCmd, tp->cp_cmd);
1549 rtl_pci_commit(tp);
1550
1551 rtl_unlock_work(tp);
1552
1553 return 0;
1554}
1555
1556static inline u32 rtl8169_tx_vlan_tag(struct sk_buff *skb)
1557{
1558 return (skb_vlan_tag_present(skb)) ?
1559 TxVlanTag | swab16(skb_vlan_tag_get(skb)) : 0x00;
1560}
1561
1562static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb)
1563{
1564 u32 opts2 = le32_to_cpu(desc->opts2);
1565
1566 if (opts2 & RxVlanTag)
1567 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), swab16(opts2 & 0xffff));
1568}
1569
1570static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1571 void *p)
1572{
1573 struct rtl8169_private *tp = netdev_priv(dev);
1574 u32 __iomem *data = tp->mmio_addr;
1575 u32 *dw = p;
1576 int i;
1577
1578 rtl_lock_work(tp);
1579 for (i = 0; i < R8169_REGS_SIZE; i += 4)
1580 memcpy_fromio(dw++, data++, 4);
1581 rtl_unlock_work(tp);
1582}
1583
1584static u32 rtl8169_get_msglevel(struct net_device *dev)
1585{
1586 struct rtl8169_private *tp = netdev_priv(dev);
1587
1588 return tp->msg_enable;
1589}
1590
1591static void rtl8169_set_msglevel(struct net_device *dev, u32 value)
1592{
1593 struct rtl8169_private *tp = netdev_priv(dev);
1594
1595 tp->msg_enable = value;
1596}
1597
1598static const char rtl8169_gstrings[][ETH_GSTRING_LEN] = {
1599 "tx_packets",
1600 "rx_packets",
1601 "tx_errors",
1602 "rx_errors",
1603 "rx_missed",
1604 "align_errors",
1605 "tx_single_collisions",
1606 "tx_multi_collisions",
1607 "unicast",
1608 "broadcast",
1609 "multicast",
1610 "tx_aborted",
1611 "tx_underrun",
1612};
1613
1614static int rtl8169_get_sset_count(struct net_device *dev, int sset)
1615{
1616 switch (sset) {
1617 case ETH_SS_STATS:
1618 return ARRAY_SIZE(rtl8169_gstrings);
1619 default:
1620 return -EOPNOTSUPP;
1621 }
1622}
1623
1624DECLARE_RTL_COND(rtl_counters_cond)
1625{
1626 return RTL_R32(tp, CounterAddrLow) & (CounterReset | CounterDump);
1627}
1628
1629static bool rtl8169_do_counters(struct rtl8169_private *tp, u32 counter_cmd)
1630{
1631 dma_addr_t paddr = tp->counters_phys_addr;
1632 u32 cmd;
1633
1634 RTL_W32(tp, CounterAddrHigh, (u64)paddr >> 32);
1635 rtl_pci_commit(tp);
1636 cmd = (u64)paddr & DMA_BIT_MASK(32);
1637 RTL_W32(tp, CounterAddrLow, cmd);
1638 RTL_W32(tp, CounterAddrLow, cmd | counter_cmd);
1639
1640 return rtl_udelay_loop_wait_low(tp, &rtl_counters_cond, 10, 1000);
1641}
1642
1643static bool rtl8169_reset_counters(struct rtl8169_private *tp)
1644{
1645
1646
1647
1648
1649 if (tp->mac_version < RTL_GIGA_MAC_VER_19)
1650 return true;
1651
1652 return rtl8169_do_counters(tp, CounterReset);
1653}
1654
1655static bool rtl8169_update_counters(struct rtl8169_private *tp)
1656{
1657 u8 val = RTL_R8(tp, ChipCmd);
1658
1659
1660
1661
1662
1663 if (!(val & CmdRxEnb) || val == 0xff)
1664 return true;
1665
1666 return rtl8169_do_counters(tp, CounterDump);
1667}
1668
1669static bool rtl8169_init_counter_offsets(struct rtl8169_private *tp)
1670{
1671 struct rtl8169_counters *counters = tp->counters;
1672 bool ret = false;
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689 if (tp->tc_offset.inited)
1690 return true;
1691
1692
1693 if (rtl8169_reset_counters(tp))
1694 ret = true;
1695
1696 if (rtl8169_update_counters(tp))
1697 ret = true;
1698
1699 tp->tc_offset.tx_errors = counters->tx_errors;
1700 tp->tc_offset.tx_multi_collision = counters->tx_multi_collision;
1701 tp->tc_offset.tx_aborted = counters->tx_aborted;
1702 tp->tc_offset.rx_missed = counters->rx_missed;
1703 tp->tc_offset.inited = true;
1704
1705 return ret;
1706}
1707
1708static void rtl8169_get_ethtool_stats(struct net_device *dev,
1709 struct ethtool_stats *stats, u64 *data)
1710{
1711 struct rtl8169_private *tp = netdev_priv(dev);
1712 struct device *d = tp_to_dev(tp);
1713 struct rtl8169_counters *counters = tp->counters;
1714
1715 ASSERT_RTNL();
1716
1717 pm_runtime_get_noresume(d);
1718
1719 if (pm_runtime_active(d))
1720 rtl8169_update_counters(tp);
1721
1722 pm_runtime_put_noidle(d);
1723
1724 data[0] = le64_to_cpu(counters->tx_packets);
1725 data[1] = le64_to_cpu(counters->rx_packets);
1726 data[2] = le64_to_cpu(counters->tx_errors);
1727 data[3] = le32_to_cpu(counters->rx_errors);
1728 data[4] = le16_to_cpu(counters->rx_missed);
1729 data[5] = le16_to_cpu(counters->align_errors);
1730 data[6] = le32_to_cpu(counters->tx_one_collision);
1731 data[7] = le32_to_cpu(counters->tx_multi_collision);
1732 data[8] = le64_to_cpu(counters->rx_unicast);
1733 data[9] = le64_to_cpu(counters->rx_broadcast);
1734 data[10] = le32_to_cpu(counters->rx_multicast);
1735 data[11] = le16_to_cpu(counters->tx_aborted);
1736 data[12] = le16_to_cpu(counters->tx_underun);
1737}
1738
1739static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1740{
1741 switch(stringset) {
1742 case ETH_SS_STATS:
1743 memcpy(data, *rtl8169_gstrings, sizeof(rtl8169_gstrings));
1744 break;
1745 }
1746}
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777struct rtl_coalesce_scale {
1778
1779 u32 nsecs[2];
1780};
1781
1782
1783struct rtl_coalesce_info {
1784 u32 speed;
1785 struct rtl_coalesce_scale scalev[4];
1786};
1787
1788
1789#define rxtx_x1822(r, t) { \
1790 {{(r), (t)}}, \
1791 {{(r)*8, (t)*8}}, \
1792 {{(r)*8*2, (t)*8*2}}, \
1793 {{(r)*8*2*2, (t)*8*2*2}}, \
1794}
1795static const struct rtl_coalesce_info rtl_coalesce_info_8169[] = {
1796
1797 { SPEED_10, rxtx_x1822(40960, 40960) },
1798 { SPEED_100, rxtx_x1822( 2560, 2560) },
1799 { SPEED_1000, rxtx_x1822( 320, 320) },
1800 { 0 },
1801};
1802
1803static const struct rtl_coalesce_info rtl_coalesce_info_8168_8136[] = {
1804
1805 { SPEED_10, rxtx_x1822(40960, 40960) },
1806 { SPEED_100, rxtx_x1822( 2560, 2560) },
1807 { SPEED_1000, rxtx_x1822( 5000, 5000) },
1808 { 0 },
1809};
1810#undef rxtx_x1822
1811
1812
1813static const struct rtl_coalesce_info *rtl_coalesce_info(struct net_device *dev)
1814{
1815 struct rtl8169_private *tp = netdev_priv(dev);
1816 const struct rtl_coalesce_info *ci;
1817
1818 if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
1819 ci = rtl_coalesce_info_8169;
1820 else
1821 ci = rtl_coalesce_info_8168_8136;
1822
1823 for (; ci->speed; ci++) {
1824 if (tp->phydev->speed == ci->speed)
1825 return ci;
1826 }
1827
1828 return ERR_PTR(-ELNRNG);
1829}
1830
1831static int rtl_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
1832{
1833 struct rtl8169_private *tp = netdev_priv(dev);
1834 const struct rtl_coalesce_info *ci;
1835 const struct rtl_coalesce_scale *scale;
1836 struct {
1837 u32 *max_frames;
1838 u32 *usecs;
1839 } coal_settings [] = {
1840 { &ec->rx_max_coalesced_frames, &ec->rx_coalesce_usecs },
1841 { &ec->tx_max_coalesced_frames, &ec->tx_coalesce_usecs }
1842 }, *p = coal_settings;
1843 int i;
1844 u16 w;
1845
1846 if (rtl_is_8125(tp))
1847 return -EOPNOTSUPP;
1848
1849 memset(ec, 0, sizeof(*ec));
1850
1851
1852 ci = rtl_coalesce_info(dev);
1853 if (IS_ERR(ci))
1854 return PTR_ERR(ci);
1855
1856 scale = &ci->scalev[tp->cp_cmd & INTT_MASK];
1857
1858
1859 for (w = RTL_R16(tp, IntrMitigate); w; w >>= RTL_COALESCE_SHIFT, p++) {
1860 *p->max_frames = (w & RTL_COALESCE_MASK) << 2;
1861 w >>= RTL_COALESCE_SHIFT;
1862 *p->usecs = w & RTL_COALESCE_MASK;
1863 }
1864
1865 for (i = 0; i < 2; i++) {
1866 p = coal_settings + i;
1867 *p->usecs = (*p->usecs * scale->nsecs[i]) / 1000;
1868
1869
1870
1871
1872
1873 if (!*p->usecs && !*p->max_frames)
1874 *p->max_frames = 1;
1875 }
1876
1877 return 0;
1878}
1879
1880
1881static const struct rtl_coalesce_scale *rtl_coalesce_choose_scale(
1882 struct net_device *dev, u32 nsec, u16 *cp01)
1883{
1884 const struct rtl_coalesce_info *ci;
1885 u16 i;
1886
1887 ci = rtl_coalesce_info(dev);
1888 if (IS_ERR(ci))
1889 return ERR_CAST(ci);
1890
1891 for (i = 0; i < 4; i++) {
1892 u32 rxtx_maxscale = max(ci->scalev[i].nsecs[0],
1893 ci->scalev[i].nsecs[1]);
1894 if (nsec <= rxtx_maxscale * RTL_COALESCE_T_MAX) {
1895 *cp01 = i;
1896 return &ci->scalev[i];
1897 }
1898 }
1899
1900 return ERR_PTR(-EINVAL);
1901}
1902
1903static int rtl_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
1904{
1905 struct rtl8169_private *tp = netdev_priv(dev);
1906 const struct rtl_coalesce_scale *scale;
1907 struct {
1908 u32 frames;
1909 u32 usecs;
1910 } coal_settings [] = {
1911 { ec->rx_max_coalesced_frames, ec->rx_coalesce_usecs },
1912 { ec->tx_max_coalesced_frames, ec->tx_coalesce_usecs }
1913 }, *p = coal_settings;
1914 u16 w = 0, cp01;
1915 int i;
1916
1917 if (rtl_is_8125(tp))
1918 return -EOPNOTSUPP;
1919
1920 scale = rtl_coalesce_choose_scale(dev,
1921 max(p[0].usecs, p[1].usecs) * 1000, &cp01);
1922 if (IS_ERR(scale))
1923 return PTR_ERR(scale);
1924
1925 for (i = 0; i < 2; i++, p++) {
1926 u32 units;
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940 if (p->frames == 1) {
1941 p->frames = 0;
1942 }
1943
1944 units = p->usecs * 1000 / scale->nsecs[i];
1945 if (p->frames > RTL_COALESCE_FRAME_MAX || p->frames % 4)
1946 return -EINVAL;
1947
1948 w <<= RTL_COALESCE_SHIFT;
1949 w |= units;
1950 w <<= RTL_COALESCE_SHIFT;
1951 w |= p->frames >> 2;
1952 }
1953
1954 rtl_lock_work(tp);
1955
1956 RTL_W16(tp, IntrMitigate, swab16(w));
1957
1958 tp->cp_cmd = (tp->cp_cmd & ~INTT_MASK) | cp01;
1959 RTL_W16(tp, CPlusCmd, tp->cp_cmd);
1960 rtl_pci_commit(tp);
1961
1962 rtl_unlock_work(tp);
1963
1964 return 0;
1965}
1966
1967static int rtl8169_get_eee(struct net_device *dev, struct ethtool_eee *data)
1968{
1969 struct rtl8169_private *tp = netdev_priv(dev);
1970 struct device *d = tp_to_dev(tp);
1971 int ret;
1972
1973 if (!rtl_supports_eee(tp))
1974 return -EOPNOTSUPP;
1975
1976 pm_runtime_get_noresume(d);
1977
1978 if (!pm_runtime_active(d)) {
1979 ret = -EOPNOTSUPP;
1980 } else {
1981 ret = phy_ethtool_get_eee(tp->phydev, data);
1982 }
1983
1984 pm_runtime_put_noidle(d);
1985
1986 return ret;
1987}
1988
1989static int rtl8169_set_eee(struct net_device *dev, struct ethtool_eee *data)
1990{
1991 struct rtl8169_private *tp = netdev_priv(dev);
1992 struct device *d = tp_to_dev(tp);
1993 int ret;
1994
1995 if (!rtl_supports_eee(tp))
1996 return -EOPNOTSUPP;
1997
1998 pm_runtime_get_noresume(d);
1999
2000 if (!pm_runtime_active(d)) {
2001 ret = -EOPNOTSUPP;
2002 goto out;
2003 }
2004
2005 if (dev->phydev->autoneg == AUTONEG_DISABLE ||
2006 dev->phydev->duplex != DUPLEX_FULL) {
2007 ret = -EPROTONOSUPPORT;
2008 goto out;
2009 }
2010
2011 ret = phy_ethtool_set_eee(tp->phydev, data);
2012
2013 if (!ret)
2014 tp->eee_adv = phy_read_mmd(dev->phydev, MDIO_MMD_AN,
2015 MDIO_AN_EEE_ADV);
2016out:
2017 pm_runtime_put_noidle(d);
2018 return ret;
2019}
2020
2021static const struct ethtool_ops rtl8169_ethtool_ops = {
2022 .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
2023 ETHTOOL_COALESCE_MAX_FRAMES,
2024 .get_drvinfo = rtl8169_get_drvinfo,
2025 .get_regs_len = rtl8169_get_regs_len,
2026 .get_link = ethtool_op_get_link,
2027 .get_coalesce = rtl_get_coalesce,
2028 .set_coalesce = rtl_set_coalesce,
2029 .get_msglevel = rtl8169_get_msglevel,
2030 .set_msglevel = rtl8169_set_msglevel,
2031 .get_regs = rtl8169_get_regs,
2032 .get_wol = rtl8169_get_wol,
2033 .set_wol = rtl8169_set_wol,
2034 .get_strings = rtl8169_get_strings,
2035 .get_sset_count = rtl8169_get_sset_count,
2036 .get_ethtool_stats = rtl8169_get_ethtool_stats,
2037 .get_ts_info = ethtool_op_get_ts_info,
2038 .nway_reset = phy_ethtool_nway_reset,
2039 .get_eee = rtl8169_get_eee,
2040 .set_eee = rtl8169_set_eee,
2041 .get_link_ksettings = phy_ethtool_get_link_ksettings,
2042 .set_link_ksettings = phy_ethtool_set_link_ksettings,
2043};
2044
2045static void rtl_enable_eee(struct rtl8169_private *tp)
2046{
2047 struct phy_device *phydev = tp->phydev;
2048 int adv;
2049
2050
2051 if (tp->eee_adv >= 0)
2052 adv = tp->eee_adv;
2053 else
2054 adv = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_EEE_ABLE);
2055
2056 if (adv >= 0)
2057 phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, adv);
2058}
2059
2060static enum mac_version rtl8169_get_mac_version(u16 xid, bool gmii)
2061{
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073 static const struct rtl_mac_info {
2074 u16 mask;
2075 u16 val;
2076 enum mac_version ver;
2077 } mac_info[] = {
2078
2079 { 0x7cf, 0x608, RTL_GIGA_MAC_VER_60 },
2080 { 0x7c8, 0x608, RTL_GIGA_MAC_VER_61 },
2081
2082
2083 { 0x7cf, 0x54a, RTL_GIGA_MAC_VER_52 },
2084
2085
2086 { 0x7cf, 0x502, RTL_GIGA_MAC_VER_51 },
2087 { 0x7cf, 0x501, RTL_GIGA_MAC_VER_50 },
2088 { 0x7cf, 0x500, RTL_GIGA_MAC_VER_49 },
2089
2090
2091 { 0x7cf, 0x541, RTL_GIGA_MAC_VER_46 },
2092 { 0x7cf, 0x540, RTL_GIGA_MAC_VER_45 },
2093
2094
2095 { 0x7cf, 0x5c8, RTL_GIGA_MAC_VER_44 },
2096 { 0x7cf, 0x509, RTL_GIGA_MAC_VER_42 },
2097 { 0x7cf, 0x4c1, RTL_GIGA_MAC_VER_41 },
2098 { 0x7cf, 0x4c0, RTL_GIGA_MAC_VER_40 },
2099
2100
2101 { 0x7c8, 0x488, RTL_GIGA_MAC_VER_38 },
2102 { 0x7cf, 0x481, RTL_GIGA_MAC_VER_36 },
2103 { 0x7cf, 0x480, RTL_GIGA_MAC_VER_35 },
2104
2105
2106 { 0x7c8, 0x2c8, RTL_GIGA_MAC_VER_34 },
2107 { 0x7cf, 0x2c1, RTL_GIGA_MAC_VER_32 },
2108 { 0x7c8, 0x2c0, RTL_GIGA_MAC_VER_33 },
2109
2110
2111 { 0x7cf, 0x281, RTL_GIGA_MAC_VER_25 },
2112 { 0x7c8, 0x280, RTL_GIGA_MAC_VER_26 },
2113
2114
2115 { 0x7cf, 0x288, RTL_GIGA_MAC_VER_27 },
2116 { 0x7cf, 0x28a, RTL_GIGA_MAC_VER_28 },
2117 { 0x7cf, 0x28b, RTL_GIGA_MAC_VER_31 },
2118
2119
2120 { 0x7cf, 0x3c9, RTL_GIGA_MAC_VER_23 },
2121 { 0x7cf, 0x3c8, RTL_GIGA_MAC_VER_18 },
2122 { 0x7c8, 0x3c8, RTL_GIGA_MAC_VER_24 },
2123 { 0x7cf, 0x3c0, RTL_GIGA_MAC_VER_19 },
2124 { 0x7cf, 0x3c2, RTL_GIGA_MAC_VER_20 },
2125 { 0x7cf, 0x3c3, RTL_GIGA_MAC_VER_21 },
2126 { 0x7c8, 0x3c0, RTL_GIGA_MAC_VER_22 },
2127
2128
2129 { 0x7cf, 0x380, RTL_GIGA_MAC_VER_12 },
2130 { 0x7c8, 0x380, RTL_GIGA_MAC_VER_17 },
2131 { 0x7c8, 0x300, RTL_GIGA_MAC_VER_11 },
2132
2133
2134 { 0x7c8, 0x448, RTL_GIGA_MAC_VER_39 },
2135 { 0x7c8, 0x440, RTL_GIGA_MAC_VER_37 },
2136 { 0x7cf, 0x409, RTL_GIGA_MAC_VER_29 },
2137 { 0x7c8, 0x408, RTL_GIGA_MAC_VER_30 },
2138 { 0x7cf, 0x349, RTL_GIGA_MAC_VER_08 },
2139 { 0x7cf, 0x249, RTL_GIGA_MAC_VER_08 },
2140 { 0x7cf, 0x348, RTL_GIGA_MAC_VER_07 },
2141 { 0x7cf, 0x248, RTL_GIGA_MAC_VER_07 },
2142 { 0x7cf, 0x340, RTL_GIGA_MAC_VER_13 },
2143
2144 { 0x7cf, 0x240, RTL_GIGA_MAC_VER_13 },
2145 { 0x7cf, 0x343, RTL_GIGA_MAC_VER_10 },
2146 { 0x7cf, 0x342, RTL_GIGA_MAC_VER_16 },
2147 { 0x7c8, 0x348, RTL_GIGA_MAC_VER_09 },
2148 { 0x7c8, 0x248, RTL_GIGA_MAC_VER_09 },
2149 { 0x7c8, 0x340, RTL_GIGA_MAC_VER_16 },
2150
2151 { 0xfc8, 0x388, RTL_GIGA_MAC_VER_15 },
2152 { 0xfc8, 0x308, RTL_GIGA_MAC_VER_14 },
2153
2154
2155 { 0xfc8, 0x980, RTL_GIGA_MAC_VER_06 },
2156 { 0xfc8, 0x180, RTL_GIGA_MAC_VER_05 },
2157 { 0xfc8, 0x100, RTL_GIGA_MAC_VER_04 },
2158 { 0xfc8, 0x040, RTL_GIGA_MAC_VER_03 },
2159 { 0xfc8, 0x008, RTL_GIGA_MAC_VER_02 },
2160
2161
2162 { 0x000, 0x000, RTL_GIGA_MAC_NONE }
2163 };
2164 const struct rtl_mac_info *p = mac_info;
2165 enum mac_version ver;
2166
2167 while ((xid & p->mask) != p->val)
2168 p++;
2169 ver = p->ver;
2170
2171 if (ver != RTL_GIGA_MAC_NONE && !gmii) {
2172 if (ver == RTL_GIGA_MAC_VER_42)
2173 ver = RTL_GIGA_MAC_VER_43;
2174 else if (ver == RTL_GIGA_MAC_VER_45)
2175 ver = RTL_GIGA_MAC_VER_47;
2176 else if (ver == RTL_GIGA_MAC_VER_46)
2177 ver = RTL_GIGA_MAC_VER_48;
2178 }
2179
2180 return ver;
2181}
2182
2183static void rtl_release_firmware(struct rtl8169_private *tp)
2184{
2185 if (tp->rtl_fw) {
2186 rtl_fw_release_firmware(tp->rtl_fw);
2187 kfree(tp->rtl_fw);
2188 tp->rtl_fw = NULL;
2189 }
2190}
2191
2192void r8169_apply_firmware(struct rtl8169_private *tp)
2193{
2194
2195 if (tp->rtl_fw) {
2196 rtl_fw_write_firmware(tp, tp->rtl_fw);
2197
2198 tp->ocp_base = OCP_STD_PHY_BASE;
2199 }
2200}
2201
2202static void rtl8168_config_eee_mac(struct rtl8169_private *tp)
2203{
2204
2205 if (tp->mac_version != RTL_GIGA_MAC_VER_38)
2206 RTL_W8(tp, EEE_LED, RTL_R8(tp, EEE_LED) & ~0x07);
2207
2208 rtl_eri_set_bits(tp, 0x1b0, ERIAR_MASK_1111, 0x0003);
2209}
2210
2211static void rtl8125_config_eee_mac(struct rtl8169_private *tp)
2212{
2213 r8168_mac_ocp_modify(tp, 0xe040, 0, BIT(1) | BIT(0));
2214 r8168_mac_ocp_modify(tp, 0xeb62, 0, BIT(2) | BIT(1));
2215}
2216
2217static void rtl_rar_exgmac_set(struct rtl8169_private *tp, u8 *addr)
2218{
2219 const u16 w[] = {
2220 addr[0] | (addr[1] << 8),
2221 addr[2] | (addr[3] << 8),
2222 addr[4] | (addr[5] << 8)
2223 };
2224
2225 rtl_eri_write(tp, 0xe0, ERIAR_MASK_1111, w[0] | (w[1] << 16));
2226 rtl_eri_write(tp, 0xe4, ERIAR_MASK_1111, w[2]);
2227 rtl_eri_write(tp, 0xf0, ERIAR_MASK_1111, w[0] << 16);
2228 rtl_eri_write(tp, 0xf4, ERIAR_MASK_1111, w[1] | (w[2] << 16));
2229}
2230
2231u16 rtl8168h_2_get_adc_bias_ioffset(struct rtl8169_private *tp)
2232{
2233 u16 data1, data2, ioffset;
2234
2235 r8168_mac_ocp_write(tp, 0xdd02, 0x807d);
2236 data1 = r8168_mac_ocp_read(tp, 0xdd02);
2237 data2 = r8168_mac_ocp_read(tp, 0xdd00);
2238
2239 ioffset = (data2 >> 1) & 0x7ff8;
2240 ioffset |= data2 & 0x0007;
2241 if (data1 & BIT(7))
2242 ioffset |= BIT(15);
2243
2244 return ioffset;
2245}
2246
2247static void rtl_schedule_task(struct rtl8169_private *tp, enum rtl_flag flag)
2248{
2249 set_bit(flag, tp->wk.flags);
2250 schedule_work(&tp->wk.work);
2251}
2252
2253static void rtl8169_init_phy(struct rtl8169_private *tp)
2254{
2255 r8169_hw_phy_config(tp, tp->phydev, tp->mac_version);
2256
2257 if (tp->mac_version <= RTL_GIGA_MAC_VER_06) {
2258 pci_write_config_byte(tp->pci_dev, PCI_LATENCY_TIMER, 0x40);
2259 pci_write_config_byte(tp->pci_dev, PCI_CACHE_LINE_SIZE, 0x08);
2260
2261 RTL_W8(tp, 0x82, 0x01);
2262 }
2263
2264 if (tp->mac_version == RTL_GIGA_MAC_VER_05 &&
2265 tp->pci_dev->subsystem_vendor == PCI_VENDOR_ID_GIGABYTE &&
2266 tp->pci_dev->subsystem_device == 0xe000)
2267 phy_write_paged(tp->phydev, 0x0001, 0x10, 0xf01b);
2268
2269
2270 phy_speed_up(tp->phydev);
2271
2272 if (rtl_supports_eee(tp))
2273 rtl_enable_eee(tp);
2274
2275 genphy_soft_reset(tp->phydev);
2276}
2277
2278static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
2279{
2280 rtl_lock_work(tp);
2281
2282 rtl_unlock_config_regs(tp);
2283
2284 RTL_W32(tp, MAC4, addr[4] | addr[5] << 8);
2285 rtl_pci_commit(tp);
2286
2287 RTL_W32(tp, MAC0, addr[0] | addr[1] << 8 | addr[2] << 16 | addr[3] << 24);
2288 rtl_pci_commit(tp);
2289
2290 if (tp->mac_version == RTL_GIGA_MAC_VER_34)
2291 rtl_rar_exgmac_set(tp, addr);
2292
2293 rtl_lock_config_regs(tp);
2294
2295 rtl_unlock_work(tp);
2296}
2297
2298static int rtl_set_mac_address(struct net_device *dev, void *p)
2299{
2300 struct rtl8169_private *tp = netdev_priv(dev);
2301 struct device *d = tp_to_dev(tp);
2302 int ret;
2303
2304 ret = eth_mac_addr(dev, p);
2305 if (ret)
2306 return ret;
2307
2308 pm_runtime_get_noresume(d);
2309
2310 if (pm_runtime_active(d))
2311 rtl_rar_set(tp, dev->dev_addr);
2312
2313 pm_runtime_put_noidle(d);
2314
2315 return 0;
2316}
2317
2318static void rtl_wol_suspend_quirk(struct rtl8169_private *tp)
2319{
2320 switch (tp->mac_version) {
2321 case RTL_GIGA_MAC_VER_25:
2322 case RTL_GIGA_MAC_VER_26:
2323 case RTL_GIGA_MAC_VER_29:
2324 case RTL_GIGA_MAC_VER_30:
2325 case RTL_GIGA_MAC_VER_32:
2326 case RTL_GIGA_MAC_VER_33:
2327 case RTL_GIGA_MAC_VER_34:
2328 case RTL_GIGA_MAC_VER_37 ... RTL_GIGA_MAC_VER_61:
2329 RTL_W32(tp, RxConfig, RTL_R32(tp, RxConfig) |
2330 AcceptBroadcast | AcceptMulticast | AcceptMyPhys);
2331 break;
2332 default:
2333 break;
2334 }
2335}
2336
2337static void rtl_pll_power_down(struct rtl8169_private *tp)
2338{
2339 if (r8168_check_dash(tp))
2340 return;
2341
2342 if (tp->mac_version == RTL_GIGA_MAC_VER_32 ||
2343 tp->mac_version == RTL_GIGA_MAC_VER_33)
2344 rtl_ephy_write(tp, 0x19, 0xff64);
2345
2346 if (device_may_wakeup(tp_to_dev(tp))) {
2347 phy_speed_down(tp->phydev, false);
2348 rtl_wol_suspend_quirk(tp);
2349 return;
2350 }
2351
2352 switch (tp->mac_version) {
2353 case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_33:
2354 case RTL_GIGA_MAC_VER_37:
2355 case RTL_GIGA_MAC_VER_39:
2356 case RTL_GIGA_MAC_VER_43:
2357 case RTL_GIGA_MAC_VER_44:
2358 case RTL_GIGA_MAC_VER_45:
2359 case RTL_GIGA_MAC_VER_46:
2360 case RTL_GIGA_MAC_VER_47:
2361 case RTL_GIGA_MAC_VER_48:
2362 case RTL_GIGA_MAC_VER_50:
2363 case RTL_GIGA_MAC_VER_51:
2364 case RTL_GIGA_MAC_VER_52:
2365 case RTL_GIGA_MAC_VER_60:
2366 case RTL_GIGA_MAC_VER_61:
2367 RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) & ~0x80);
2368 break;
2369 case RTL_GIGA_MAC_VER_40:
2370 case RTL_GIGA_MAC_VER_41:
2371 case RTL_GIGA_MAC_VER_49:
2372 rtl_eri_clear_bits(tp, 0x1a8, ERIAR_MASK_1111, 0xfc000000);
2373 RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) & ~0x80);
2374 break;
2375 default:
2376 break;
2377 }
2378}
2379
2380static void rtl_pll_power_up(struct rtl8169_private *tp)
2381{
2382 switch (tp->mac_version) {
2383 case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_33:
2384 case RTL_GIGA_MAC_VER_37:
2385 case RTL_GIGA_MAC_VER_39:
2386 case RTL_GIGA_MAC_VER_43:
2387 RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0x80);
2388 break;
2389 case RTL_GIGA_MAC_VER_44:
2390 case RTL_GIGA_MAC_VER_45:
2391 case RTL_GIGA_MAC_VER_46:
2392 case RTL_GIGA_MAC_VER_47:
2393 case RTL_GIGA_MAC_VER_48:
2394 case RTL_GIGA_MAC_VER_50:
2395 case RTL_GIGA_MAC_VER_51:
2396 case RTL_GIGA_MAC_VER_52:
2397 case RTL_GIGA_MAC_VER_60:
2398 case RTL_GIGA_MAC_VER_61:
2399 RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0xc0);
2400 break;
2401 case RTL_GIGA_MAC_VER_40:
2402 case RTL_GIGA_MAC_VER_41:
2403 case RTL_GIGA_MAC_VER_49:
2404 RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0xc0);
2405 rtl_eri_set_bits(tp, 0x1a8, ERIAR_MASK_1111, 0xfc000000);
2406 break;
2407 default:
2408 break;
2409 }
2410
2411 phy_resume(tp->phydev);
2412
2413 msleep(20);
2414}
2415
2416static void rtl_init_rxcfg(struct rtl8169_private *tp)
2417{
2418 switch (tp->mac_version) {
2419 case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06:
2420 case RTL_GIGA_MAC_VER_10 ... RTL_GIGA_MAC_VER_17:
2421 RTL_W32(tp, RxConfig, RX_FIFO_THRESH | RX_DMA_BURST);
2422 break;
2423 case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_24:
2424 case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_36:
2425 case RTL_GIGA_MAC_VER_38:
2426 RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
2427 break;
2428 case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_52:
2429 RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF);
2430 break;
2431 case RTL_GIGA_MAC_VER_60 ... RTL_GIGA_MAC_VER_61:
2432 RTL_W32(tp, RxConfig, RX_FETCH_DFLT_8125 | RX_VLAN_8125 |
2433 RX_DMA_BURST);
2434 break;
2435 default:
2436 RTL_W32(tp, RxConfig, RX128_INT_EN | RX_DMA_BURST);
2437 break;
2438 }
2439}
2440
2441static void rtl8169_init_ring_indexes(struct rtl8169_private *tp)
2442{
2443 tp->dirty_tx = tp->cur_tx = tp->cur_rx = 0;
2444}
2445
2446static void r8168c_hw_jumbo_enable(struct rtl8169_private *tp)
2447{
2448 RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0);
2449 RTL_W8(tp, Config4, RTL_R8(tp, Config4) | Jumbo_En1);
2450}
2451
2452static void r8168c_hw_jumbo_disable(struct rtl8169_private *tp)
2453{
2454 RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0);
2455 RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~Jumbo_En1);
2456}
2457
2458static void r8168dp_hw_jumbo_enable(struct rtl8169_private *tp)
2459{
2460 RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0);
2461}
2462
2463static void r8168dp_hw_jumbo_disable(struct rtl8169_private *tp)
2464{
2465 RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0);
2466}
2467
2468static void r8168e_hw_jumbo_enable(struct rtl8169_private *tp)
2469{
2470 RTL_W8(tp, MaxTxPacketSize, 0x3f);
2471 RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0);
2472 RTL_W8(tp, Config4, RTL_R8(tp, Config4) | 0x01);
2473}
2474
2475static void r8168e_hw_jumbo_disable(struct rtl8169_private *tp)
2476{
2477 RTL_W8(tp, MaxTxPacketSize, 0x0c);
2478 RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0);
2479 RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~0x01);
2480}
2481
2482static void r8168b_1_hw_jumbo_enable(struct rtl8169_private *tp)
2483{
2484 RTL_W8(tp, Config4, RTL_R8(tp, Config4) | (1 << 0));
2485}
2486
2487static void r8168b_1_hw_jumbo_disable(struct rtl8169_private *tp)
2488{
2489 RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~(1 << 0));
2490}
2491
2492static void rtl_jumbo_config(struct rtl8169_private *tp)
2493{
2494 bool jumbo = tp->dev->mtu > ETH_DATA_LEN;
2495
2496 rtl_unlock_config_regs(tp);
2497 switch (tp->mac_version) {
2498 case RTL_GIGA_MAC_VER_12:
2499 case RTL_GIGA_MAC_VER_17:
2500 if (jumbo) {
2501 pcie_set_readrq(tp->pci_dev, 512);
2502 r8168b_1_hw_jumbo_enable(tp);
2503 } else {
2504 r8168b_1_hw_jumbo_disable(tp);
2505 }
2506 break;
2507 case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_26:
2508 if (jumbo) {
2509 pcie_set_readrq(tp->pci_dev, 512);
2510 r8168c_hw_jumbo_enable(tp);
2511 } else {
2512 r8168c_hw_jumbo_disable(tp);
2513 }
2514 break;
2515 case RTL_GIGA_MAC_VER_27 ... RTL_GIGA_MAC_VER_28:
2516 if (jumbo)
2517 r8168dp_hw_jumbo_enable(tp);
2518 else
2519 r8168dp_hw_jumbo_disable(tp);
2520 break;
2521 case RTL_GIGA_MAC_VER_31 ... RTL_GIGA_MAC_VER_33:
2522 if (jumbo) {
2523 pcie_set_readrq(tp->pci_dev, 512);
2524 r8168e_hw_jumbo_enable(tp);
2525 } else {
2526 r8168e_hw_jumbo_disable(tp);
2527 }
2528 break;
2529 default:
2530 break;
2531 }
2532 rtl_lock_config_regs(tp);
2533
2534 if (!jumbo && pci_is_pcie(tp->pci_dev) && tp->supports_gmii)
2535 pcie_set_readrq(tp->pci_dev, 4096);
2536}
2537
2538DECLARE_RTL_COND(rtl_chipcmd_cond)
2539{
2540 return RTL_R8(tp, ChipCmd) & CmdReset;
2541}
2542
2543static void rtl_hw_reset(struct rtl8169_private *tp)
2544{
2545 RTL_W8(tp, ChipCmd, CmdReset);
2546
2547 rtl_udelay_loop_wait_low(tp, &rtl_chipcmd_cond, 100, 100);
2548}
2549
2550static void rtl_request_firmware(struct rtl8169_private *tp)
2551{
2552 struct rtl_fw *rtl_fw;
2553
2554
2555 if (tp->rtl_fw || !tp->fw_name)
2556 return;
2557
2558 rtl_fw = kzalloc(sizeof(*rtl_fw), GFP_KERNEL);
2559 if (!rtl_fw) {
2560 netif_warn(tp, ifup, tp->dev, "Unable to load firmware, out of memory\n");
2561 return;
2562 }
2563
2564 rtl_fw->phy_write = rtl_writephy;
2565 rtl_fw->phy_read = rtl_readphy;
2566 rtl_fw->mac_mcu_write = mac_mcu_write;
2567 rtl_fw->mac_mcu_read = mac_mcu_read;
2568 rtl_fw->fw_name = tp->fw_name;
2569 rtl_fw->dev = tp_to_dev(tp);
2570
2571 if (rtl_fw_request_firmware(rtl_fw))
2572 kfree(rtl_fw);
2573 else
2574 tp->rtl_fw = rtl_fw;
2575}
2576
2577static void rtl_rx_close(struct rtl8169_private *tp)
2578{
2579 RTL_W32(tp, RxConfig, RTL_R32(tp, RxConfig) & ~RX_CONFIG_ACCEPT_MASK);
2580}
2581
2582DECLARE_RTL_COND(rtl_npq_cond)
2583{
2584 return RTL_R8(tp, TxPoll) & NPQ;
2585}
2586
2587DECLARE_RTL_COND(rtl_txcfg_empty_cond)
2588{
2589 return RTL_R32(tp, TxConfig) & TXCFG_EMPTY;
2590}
2591
2592static void rtl8169_hw_reset(struct rtl8169_private *tp)
2593{
2594
2595 rtl8169_irq_mask_and_ack(tp);
2596
2597 rtl_rx_close(tp);
2598
2599 switch (tp->mac_version) {
2600 case RTL_GIGA_MAC_VER_27:
2601 case RTL_GIGA_MAC_VER_28:
2602 case RTL_GIGA_MAC_VER_31:
2603 rtl_udelay_loop_wait_low(tp, &rtl_npq_cond, 20, 42*42);
2604 break;
2605 case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_38:
2606 case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_52:
2607 RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq);
2608 rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666);
2609 break;
2610 default:
2611 RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq);
2612 udelay(100);
2613 break;
2614 }
2615
2616 rtl_hw_reset(tp);
2617}
2618
2619static void rtl_set_tx_config_registers(struct rtl8169_private *tp)
2620{
2621 u32 val = TX_DMA_BURST << TxDMAShift |
2622 InterFrameGap << TxInterFrameGapShift;
2623
2624 if (rtl_is_8168evl_up(tp))
2625 val |= TXCFG_AUTO_FIFO;
2626
2627 RTL_W32(tp, TxConfig, val);
2628}
2629
2630static void rtl_set_rx_max_size(struct rtl8169_private *tp)
2631{
2632
2633 RTL_W16(tp, RxMaxSize, R8169_RX_BUF_SIZE + 1);
2634}
2635
2636static void rtl_set_rx_tx_desc_registers(struct rtl8169_private *tp)
2637{
2638
2639
2640
2641
2642
2643 RTL_W32(tp, TxDescStartAddrHigh, ((u64) tp->TxPhyAddr) >> 32);
2644 RTL_W32(tp, TxDescStartAddrLow, ((u64) tp->TxPhyAddr) & DMA_BIT_MASK(32));
2645 RTL_W32(tp, RxDescAddrHigh, ((u64) tp->RxPhyAddr) >> 32);
2646 RTL_W32(tp, RxDescAddrLow, ((u64) tp->RxPhyAddr) & DMA_BIT_MASK(32));
2647}
2648
2649static void rtl8169_set_magic_reg(struct rtl8169_private *tp, unsigned mac_version)
2650{
2651 u32 val;
2652
2653 if (tp->mac_version == RTL_GIGA_MAC_VER_05)
2654 val = 0x000fff00;
2655 else if (tp->mac_version == RTL_GIGA_MAC_VER_06)
2656 val = 0x00ffff00;
2657 else
2658 return;
2659
2660 if (RTL_R8(tp, Config2) & PCI_Clock_66MHz)
2661 val |= 0xff;
2662
2663 RTL_W32(tp, 0x7c, val);
2664}
2665
2666static void rtl_set_rx_mode(struct net_device *dev)
2667{
2668 u32 rx_mode = AcceptBroadcast | AcceptMyPhys | AcceptMulticast;
2669
2670 u32 mc_filter[2] = { 0xffffffff, 0xffffffff };
2671 struct rtl8169_private *tp = netdev_priv(dev);
2672 u32 tmp;
2673
2674 if (dev->flags & IFF_PROMISC) {
2675
2676 netif_notice(tp, link, dev, "Promiscuous mode enabled\n");
2677 rx_mode |= AcceptAllPhys;
2678 } else if (netdev_mc_count(dev) > MC_FILTER_LIMIT ||
2679 dev->flags & IFF_ALLMULTI ||
2680 tp->mac_version == RTL_GIGA_MAC_VER_35) {
2681
2682 } else if (netdev_mc_empty(dev)) {
2683 rx_mode &= ~AcceptMulticast;
2684 } else {
2685 struct netdev_hw_addr *ha;
2686
2687 mc_filter[1] = mc_filter[0] = 0;
2688 netdev_for_each_mc_addr(ha, dev) {
2689 u32 bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
2690 mc_filter[bit_nr >> 5] |= BIT(bit_nr & 31);
2691 }
2692
2693 if (tp->mac_version > RTL_GIGA_MAC_VER_06) {
2694 tmp = mc_filter[0];
2695 mc_filter[0] = swab32(mc_filter[1]);
2696 mc_filter[1] = swab32(tmp);
2697 }
2698 }
2699
2700 if (dev->features & NETIF_F_RXALL)
2701 rx_mode |= (AcceptErr | AcceptRunt);
2702
2703 RTL_W32(tp, MAR0 + 4, mc_filter[1]);
2704 RTL_W32(tp, MAR0 + 0, mc_filter[0]);
2705
2706 tmp = RTL_R32(tp, RxConfig);
2707 RTL_W32(tp, RxConfig, (tmp & ~RX_CONFIG_ACCEPT_MASK) | rx_mode);
2708}
2709
2710DECLARE_RTL_COND(rtl_csiar_cond)
2711{
2712 return RTL_R32(tp, CSIAR) & CSIAR_FLAG;
2713}
2714
2715static void rtl_csi_write(struct rtl8169_private *tp, int addr, int value)
2716{
2717 u32 func = PCI_FUNC(tp->pci_dev->devfn);
2718
2719 RTL_W32(tp, CSIDR, value);
2720 RTL_W32(tp, CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
2721 CSIAR_BYTE_ENABLE | func << 16);
2722
2723 rtl_udelay_loop_wait_low(tp, &rtl_csiar_cond, 10, 100);
2724}
2725
2726static u32 rtl_csi_read(struct rtl8169_private *tp, int addr)
2727{
2728 u32 func = PCI_FUNC(tp->pci_dev->devfn);
2729
2730 RTL_W32(tp, CSIAR, (addr & CSIAR_ADDR_MASK) | func << 16 |
2731 CSIAR_BYTE_ENABLE);
2732
2733 return rtl_udelay_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ?
2734 RTL_R32(tp, CSIDR) : ~0;
2735}
2736
2737static void rtl_csi_access_enable(struct rtl8169_private *tp, u8 val)
2738{
2739 struct pci_dev *pdev = tp->pci_dev;
2740 u32 csi;
2741
2742
2743
2744
2745
2746 if (pdev->cfg_size > 0x070f &&
2747 pci_write_config_byte(pdev, 0x070f, val) == PCIBIOS_SUCCESSFUL)
2748 return;
2749
2750 netdev_notice_once(tp->dev,
2751 "No native access to PCI extended config space, falling back to CSI\n");
2752 csi = rtl_csi_read(tp, 0x070c) & 0x00ffffff;
2753 rtl_csi_write(tp, 0x070c, csi | val << 24);
2754}
2755
2756static void rtl_set_def_aspm_entry_latency(struct rtl8169_private *tp)
2757{
2758 rtl_csi_access_enable(tp, 0x27);
2759}
2760
2761struct ephy_info {
2762 unsigned int offset;
2763 u16 mask;
2764 u16 bits;
2765};
2766
2767static void __rtl_ephy_init(struct rtl8169_private *tp,
2768 const struct ephy_info *e, int len)
2769{
2770 u16 w;
2771
2772 while (len-- > 0) {
2773 w = (rtl_ephy_read(tp, e->offset) & ~e->mask) | e->bits;
2774 rtl_ephy_write(tp, e->offset, w);
2775 e++;
2776 }
2777}
2778
2779#define rtl_ephy_init(tp, a) __rtl_ephy_init(tp, a, ARRAY_SIZE(a))
2780
2781static void rtl_disable_clock_request(struct rtl8169_private *tp)
2782{
2783 pcie_capability_clear_word(tp->pci_dev, PCI_EXP_LNKCTL,
2784 PCI_EXP_LNKCTL_CLKREQ_EN);
2785}
2786
2787static void rtl_enable_clock_request(struct rtl8169_private *tp)
2788{
2789 pcie_capability_set_word(tp->pci_dev, PCI_EXP_LNKCTL,
2790 PCI_EXP_LNKCTL_CLKREQ_EN);
2791}
2792
2793static void rtl_pcie_state_l2l3_disable(struct rtl8169_private *tp)
2794{
2795
2796 RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Rdy_to_L23);
2797}
2798
2799static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable)
2800{
2801
2802 if (enable && tp->aspm_manageable) {
2803 RTL_W8(tp, Config5, RTL_R8(tp, Config5) | ASPM_en);
2804 RTL_W8(tp, Config2, RTL_R8(tp, Config2) | ClkReqEn);
2805 } else {
2806 RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn);
2807 RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en);
2808 }
2809
2810 udelay(10);
2811}
2812
2813static void rtl_set_fifo_size(struct rtl8169_private *tp, u16 rx_stat,
2814 u16 tx_stat, u16 rx_dyn, u16 tx_dyn)
2815{
2816
2817
2818
2819 rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, (rx_stat << 16) | rx_dyn);
2820 rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, (tx_stat << 16) | tx_dyn);
2821}
2822
2823static void rtl8168g_set_pause_thresholds(struct rtl8169_private *tp,
2824 u8 low, u8 high)
2825{
2826
2827 rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, low);
2828 rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, high);
2829}
2830
2831static void rtl_hw_start_8168b(struct rtl8169_private *tp)
2832{
2833 RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
2834}
2835
2836static void __rtl_hw_start_8168cp(struct rtl8169_private *tp)
2837{
2838 RTL_W8(tp, Config1, RTL_R8(tp, Config1) | Speed_down);
2839
2840 RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
2841
2842 rtl_disable_clock_request(tp);
2843}
2844
2845static void rtl_hw_start_8168cp_1(struct rtl8169_private *tp)
2846{
2847 static const struct ephy_info e_info_8168cp[] = {
2848 { 0x01, 0, 0x0001 },
2849 { 0x02, 0x0800, 0x1000 },
2850 { 0x03, 0, 0x0042 },
2851 { 0x06, 0x0080, 0x0000 },
2852 { 0x07, 0, 0x2000 }
2853 };
2854
2855 rtl_set_def_aspm_entry_latency(tp);
2856
2857 rtl_ephy_init(tp, e_info_8168cp);
2858
2859 __rtl_hw_start_8168cp(tp);
2860}
2861
2862static void rtl_hw_start_8168cp_2(struct rtl8169_private *tp)
2863{
2864 rtl_set_def_aspm_entry_latency(tp);
2865
2866 RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
2867}
2868
2869static void rtl_hw_start_8168cp_3(struct rtl8169_private *tp)
2870{
2871 rtl_set_def_aspm_entry_latency(tp);
2872
2873 RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
2874
2875
2876 RTL_W8(tp, DBG_REG, 0x20);
2877}
2878
2879static void rtl_hw_start_8168c_1(struct rtl8169_private *tp)
2880{
2881 static const struct ephy_info e_info_8168c_1[] = {
2882 { 0x02, 0x0800, 0x1000 },
2883 { 0x03, 0, 0x0002 },
2884 { 0x06, 0x0080, 0x0000 }
2885 };
2886
2887 rtl_set_def_aspm_entry_latency(tp);
2888
2889 RTL_W8(tp, DBG_REG, 0x06 | FIX_NAK_1 | FIX_NAK_2);
2890
2891 rtl_ephy_init(tp, e_info_8168c_1);
2892
2893 __rtl_hw_start_8168cp(tp);
2894}
2895
2896static void rtl_hw_start_8168c_2(struct rtl8169_private *tp)
2897{
2898 static const struct ephy_info e_info_8168c_2[] = {
2899 { 0x01, 0, 0x0001 },
2900 { 0x03, 0x0400, 0x0020 }
2901 };
2902
2903 rtl_set_def_aspm_entry_latency(tp);
2904
2905 rtl_ephy_init(tp, e_info_8168c_2);
2906
2907 __rtl_hw_start_8168cp(tp);
2908}
2909
2910static void rtl_hw_start_8168c_3(struct rtl8169_private *tp)
2911{
2912 rtl_hw_start_8168c_2(tp);
2913}
2914
2915static void rtl_hw_start_8168c_4(struct rtl8169_private *tp)
2916{
2917 rtl_set_def_aspm_entry_latency(tp);
2918
2919 __rtl_hw_start_8168cp(tp);
2920}
2921
2922static void rtl_hw_start_8168d(struct rtl8169_private *tp)
2923{
2924 rtl_set_def_aspm_entry_latency(tp);
2925
2926 rtl_disable_clock_request(tp);
2927}
2928
2929static void rtl_hw_start_8168d_4(struct rtl8169_private *tp)
2930{
2931 static const struct ephy_info e_info_8168d_4[] = {
2932 { 0x0b, 0x0000, 0x0048 },
2933 { 0x19, 0x0020, 0x0050 },
2934 { 0x0c, 0x0100, 0x0020 },
2935 { 0x10, 0x0004, 0x0000 },
2936 };
2937
2938 rtl_set_def_aspm_entry_latency(tp);
2939
2940 rtl_ephy_init(tp, e_info_8168d_4);
2941
2942 rtl_enable_clock_request(tp);
2943}
2944
2945static void rtl_hw_start_8168e_1(struct rtl8169_private *tp)
2946{
2947 static const struct ephy_info e_info_8168e_1[] = {
2948 { 0x00, 0x0200, 0x0100 },
2949 { 0x00, 0x0000, 0x0004 },
2950 { 0x06, 0x0002, 0x0001 },
2951 { 0x06, 0x0000, 0x0030 },
2952 { 0x07, 0x0000, 0x2000 },
2953 { 0x00, 0x0000, 0x0020 },
2954 { 0x03, 0x5800, 0x2000 },
2955 { 0x03, 0x0000, 0x0001 },
2956 { 0x01, 0x0800, 0x1000 },
2957 { 0x07, 0x0000, 0x4000 },
2958 { 0x1e, 0x0000, 0x2000 },
2959 { 0x19, 0xffff, 0xfe6c },
2960 { 0x0a, 0x0000, 0x0040 }
2961 };
2962
2963 rtl_set_def_aspm_entry_latency(tp);
2964
2965 rtl_ephy_init(tp, e_info_8168e_1);
2966
2967 rtl_disable_clock_request(tp);
2968
2969
2970 RTL_W32(tp, MISC, RTL_R32(tp, MISC) | TXPLA_RST);
2971 RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~TXPLA_RST);
2972
2973 RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~Spi_en);
2974}
2975
2976static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
2977{
2978 static const struct ephy_info e_info_8168e_2[] = {
2979 { 0x09, 0x0000, 0x0080 },
2980 { 0x19, 0x0000, 0x0224 },
2981 { 0x00, 0x0000, 0x0004 },
2982 { 0x0c, 0x3df0, 0x0200 },
2983 };
2984
2985 rtl_set_def_aspm_entry_latency(tp);
2986
2987 rtl_ephy_init(tp, e_info_8168e_2);
2988
2989 rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
2990 rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);
2991 rtl_set_fifo_size(tp, 0x10, 0x10, 0x02, 0x06);
2992 rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050);
2993 rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x07ff0060);
2994 rtl_eri_set_bits(tp, 0x1b0, ERIAR_MASK_0001, BIT(4));
2995 rtl_w0w1_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00);
2996
2997 rtl_disable_clock_request(tp);
2998
2999 RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB);
3000
3001 rtl8168_config_eee_mac(tp);
3002
3003 RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN);
3004 RTL_W32(tp, MISC, RTL_R32(tp, MISC) | PWM_EN);
3005 RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~Spi_en);
3006
3007 rtl_hw_aspm_clkreq_enable(tp, true);
3008}
3009
3010static void rtl_hw_start_8168f(struct rtl8169_private *tp)
3011{
3012 rtl_set_def_aspm_entry_latency(tp);
3013
3014 rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
3015 rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);
3016 rtl_set_fifo_size(tp, 0x10, 0x10, 0x02, 0x06);
3017 rtl_reset_packet_filter(tp);
3018 rtl_eri_set_bits(tp, 0x1b0, ERIAR_MASK_0001, BIT(4));
3019 rtl_eri_set_bits(tp, 0x1d0, ERIAR_MASK_0001, BIT(4));
3020 rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050);
3021 rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x00000060);
3022
3023 rtl_disable_clock_request(tp);
3024
3025 RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB);
3026 RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN);
3027 RTL_W32(tp, MISC, RTL_R32(tp, MISC) | PWM_EN);
3028 RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~Spi_en);
3029
3030 rtl8168_config_eee_mac(tp);
3031}
3032
3033static void rtl_hw_start_8168f_1(struct rtl8169_private *tp)
3034{
3035 static const struct ephy_info e_info_8168f_1[] = {
3036 { 0x06, 0x00c0, 0x0020 },
3037 { 0x08, 0x0001, 0x0002 },
3038 { 0x09, 0x0000, 0x0080 },
3039 { 0x19, 0x0000, 0x0224 },
3040 { 0x00, 0x0000, 0x0004 },
3041 { 0x0c, 0x3df0, 0x0200 },
3042 };
3043
3044 rtl_hw_start_8168f(tp);
3045
3046 rtl_ephy_init(tp, e_info_8168f_1);
3047
3048 rtl_w0w1_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00);
3049}
3050
3051static void rtl_hw_start_8411(struct rtl8169_private *tp)
3052{
3053 static const struct ephy_info e_info_8168f_1[] = {
3054 { 0x06, 0x00c0, 0x0020 },
3055 { 0x0f, 0xffff, 0x5200 },
3056 { 0x19, 0x0000, 0x0224 },
3057 { 0x00, 0x0000, 0x0004 },
3058 { 0x0c, 0x3df0, 0x0200 },
3059 };
3060
3061 rtl_hw_start_8168f(tp);
3062 rtl_pcie_state_l2l3_disable(tp);
3063
3064 rtl_ephy_init(tp, e_info_8168f_1);
3065
3066 rtl_eri_set_bits(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00);
3067}
3068
3069static void rtl_hw_start_8168g(struct rtl8169_private *tp)
3070{
3071 rtl_set_fifo_size(tp, 0x08, 0x10, 0x02, 0x06);
3072 rtl8168g_set_pause_thresholds(tp, 0x38, 0x48);
3073
3074 rtl_set_def_aspm_entry_latency(tp);
3075
3076 rtl_reset_packet_filter(tp);
3077 rtl_eri_write(tp, 0x2f8, ERIAR_MASK_0011, 0x1d8f);
3078
3079 RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN);
3080
3081 rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
3082 rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);
3083
3084 rtl8168_config_eee_mac(tp);
3085
3086 rtl_w0w1_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x06);
3087 rtl_eri_clear_bits(tp, 0x1b0, ERIAR_MASK_0011, BIT(12));
3088
3089 rtl_pcie_state_l2l3_disable(tp);
3090}
3091
3092static void rtl_hw_start_8168g_1(struct rtl8169_private *tp)
3093{
3094 static const struct ephy_info e_info_8168g_1[] = {
3095 { 0x00, 0x0008, 0x0000 },
3096 { 0x0c, 0x3ff0, 0x0820 },
3097 { 0x1e, 0x0000, 0x0001 },
3098 { 0x19, 0x8000, 0x0000 }
3099 };
3100
3101 rtl_hw_start_8168g(tp);
3102
3103
3104 rtl_hw_aspm_clkreq_enable(tp, false);
3105 rtl_ephy_init(tp, e_info_8168g_1);
3106 rtl_hw_aspm_clkreq_enable(tp, true);
3107}
3108
3109static void rtl_hw_start_8168g_2(struct rtl8169_private *tp)
3110{
3111 static const struct ephy_info e_info_8168g_2[] = {
3112 { 0x00, 0x0008, 0x0000 },
3113 { 0x0c, 0x3ff0, 0x0820 },
3114 { 0x19, 0xffff, 0x7c00 },
3115 { 0x1e, 0xffff, 0x20eb },
3116 { 0x0d, 0xffff, 0x1666 },
3117 { 0x00, 0xffff, 0x10a3 },
3118 { 0x06, 0xffff, 0xf050 },
3119 { 0x04, 0x0000, 0x0010 },
3120 { 0x1d, 0x4000, 0x0000 },
3121 };
3122
3123 rtl_hw_start_8168g(tp);
3124
3125
3126 rtl_hw_aspm_clkreq_enable(tp, false);
3127 rtl_ephy_init(tp, e_info_8168g_2);
3128}
3129
3130static void rtl_hw_start_8411_2(struct rtl8169_private *tp)
3131{
3132 static const struct ephy_info e_info_8411_2[] = {
3133 { 0x00, 0x0008, 0x0000 },
3134 { 0x0c, 0x37d0, 0x0820 },
3135 { 0x1e, 0x0000, 0x0001 },
3136 { 0x19, 0x8021, 0x0000 },
3137 { 0x1e, 0x0000, 0x2000 },
3138 { 0x0d, 0x0100, 0x0200 },
3139 { 0x00, 0x0000, 0x0080 },
3140 { 0x06, 0x0000, 0x0010 },
3141 { 0x04, 0x0000, 0x0010 },
3142 { 0x1d, 0x0000, 0x4000 },
3143 };
3144
3145 rtl_hw_start_8168g(tp);
3146
3147
3148 rtl_hw_aspm_clkreq_enable(tp, false);
3149 rtl_ephy_init(tp, e_info_8411_2);
3150
3151
3152
3153
3154 r8168_mac_ocp_write(tp, 0xFC28, 0x0000);
3155 r8168_mac_ocp_write(tp, 0xFC2A, 0x0000);
3156 r8168_mac_ocp_write(tp, 0xFC2C, 0x0000);
3157 r8168_mac_ocp_write(tp, 0xFC2E, 0x0000);
3158 r8168_mac_ocp_write(tp, 0xFC30, 0x0000);
3159 r8168_mac_ocp_write(tp, 0xFC32, 0x0000);
3160 r8168_mac_ocp_write(tp, 0xFC34, 0x0000);
3161 r8168_mac_ocp_write(tp, 0xFC36, 0x0000);
3162 mdelay(3);
3163 r8168_mac_ocp_write(tp, 0xFC26, 0x0000);
3164
3165 r8168_mac_ocp_write(tp, 0xF800, 0xE008);
3166 r8168_mac_ocp_write(tp, 0xF802, 0xE00A);
3167 r8168_mac_ocp_write(tp, 0xF804, 0xE00C);
3168 r8168_mac_ocp_write(tp, 0xF806, 0xE00E);
3169 r8168_mac_ocp_write(tp, 0xF808, 0xE027);
3170 r8168_mac_ocp_write(tp, 0xF80A, 0xE04F);
3171 r8168_mac_ocp_write(tp, 0xF80C, 0xE05E);
3172 r8168_mac_ocp_write(tp, 0xF80E, 0xE065);
3173 r8168_mac_ocp_write(tp, 0xF810, 0xC602);
3174 r8168_mac_ocp_write(tp, 0xF812, 0xBE00);
3175 r8168_mac_ocp_write(tp, 0xF814, 0x0000);
3176 r8168_mac_ocp_write(tp, 0xF816, 0xC502);
3177 r8168_mac_ocp_write(tp, 0xF818, 0xBD00);
3178 r8168_mac_ocp_write(tp, 0xF81A, 0x074C);
3179 r8168_mac_ocp_write(tp, 0xF81C, 0xC302);
3180 r8168_mac_ocp_write(tp, 0xF81E, 0xBB00);
3181 r8168_mac_ocp_write(tp, 0xF820, 0x080A);
3182 r8168_mac_ocp_write(tp, 0xF822, 0x6420);
3183 r8168_mac_ocp_write(tp, 0xF824, 0x48C2);
3184 r8168_mac_ocp_write(tp, 0xF826, 0x8C20);
3185 r8168_mac_ocp_write(tp, 0xF828, 0xC516);
3186 r8168_mac_ocp_write(tp, 0xF82A, 0x64A4);
3187 r8168_mac_ocp_write(tp, 0xF82C, 0x49C0);
3188 r8168_mac_ocp_write(tp, 0xF82E, 0xF009);
3189 r8168_mac_ocp_write(tp, 0xF830, 0x74A2);
3190 r8168_mac_ocp_write(tp, 0xF832, 0x8CA5);
3191 r8168_mac_ocp_write(tp, 0xF834, 0x74A0);
3192 r8168_mac_ocp_write(tp, 0xF836, 0xC50E);
3193 r8168_mac_ocp_write(tp, 0xF838, 0x9CA2);
3194 r8168_mac_ocp_write(tp, 0xF83A, 0x1C11);
3195 r8168_mac_ocp_write(tp, 0xF83C, 0x9CA0);
3196 r8168_mac_ocp_write(tp, 0xF83E, 0xE006);
3197 r8168_mac_ocp_write(tp, 0xF840, 0x74F8);
3198 r8168_mac_ocp_write(tp, 0xF842, 0x48C4);
3199 r8168_mac_ocp_write(tp, 0xF844, 0x8CF8);
3200 r8168_mac_ocp_write(tp, 0xF846, 0xC404);
3201 r8168_mac_ocp_write(tp, 0xF848, 0xBC00);
3202 r8168_mac_ocp_write(tp, 0xF84A, 0xC403);
3203 r8168_mac_ocp_write(tp, 0xF84C, 0xBC00);
3204 r8168_mac_ocp_write(tp, 0xF84E, 0x0BF2);
3205 r8168_mac_ocp_write(tp, 0xF850, 0x0C0A);
3206 r8168_mac_ocp_write(tp, 0xF852, 0xE434);
3207 r8168_mac_ocp_write(tp, 0xF854, 0xD3C0);
3208 r8168_mac_ocp_write(tp, 0xF856, 0x49D9);
3209 r8168_mac_ocp_write(tp, 0xF858, 0xF01F);
3210 r8168_mac_ocp_write(tp, 0xF85A, 0xC526);
3211 r8168_mac_ocp_write(tp, 0xF85C, 0x64A5);
3212 r8168_mac_ocp_write(tp, 0xF85E, 0x1400);
3213 r8168_mac_ocp_write(tp, 0xF860, 0xF007);
3214 r8168_mac_ocp_write(tp, 0xF862, 0x0C01);
3215 r8168_mac_ocp_write(tp, 0xF864, 0x8CA5);
3216 r8168_mac_ocp_write(tp, 0xF866, 0x1C15);
3217 r8168_mac_ocp_write(tp, 0xF868, 0xC51B);
3218 r8168_mac_ocp_write(tp, 0xF86A, 0x9CA0);
3219 r8168_mac_ocp_write(tp, 0xF86C, 0xE013);
3220 r8168_mac_ocp_write(tp, 0xF86E, 0xC519);
3221 r8168_mac_ocp_write(tp, 0xF870, 0x74A0);
3222 r8168_mac_ocp_write(tp, 0xF872, 0x48C4);
3223 r8168_mac_ocp_write(tp, 0xF874, 0x8CA0);
3224 r8168_mac_ocp_write(tp, 0xF876, 0xC516);
3225 r8168_mac_ocp_write(tp, 0xF878, 0x74A4);
3226 r8168_mac_ocp_write(tp, 0xF87A, 0x48C8);
3227 r8168_mac_ocp_write(tp, 0xF87C, 0x48CA);
3228 r8168_mac_ocp_write(tp, 0xF87E, 0x9CA4);
3229 r8168_mac_ocp_write(tp, 0xF880, 0xC512);
3230 r8168_mac_ocp_write(tp, 0xF882, 0x1B00);
3231 r8168_mac_ocp_write(tp, 0xF884, 0x9BA0);
3232 r8168_mac_ocp_write(tp, 0xF886, 0x1B1C);
3233 r8168_mac_ocp_write(tp, 0xF888, 0x483F);
3234 r8168_mac_ocp_write(tp, 0xF88A, 0x9BA2);
3235 r8168_mac_ocp_write(tp, 0xF88C, 0x1B04);
3236 r8168_mac_ocp_write(tp, 0xF88E, 0xC508);
3237 r8168_mac_ocp_write(tp, 0xF890, 0x9BA0);
3238 r8168_mac_ocp_write(tp, 0xF892, 0xC505);
3239 r8168_mac_ocp_write(tp, 0xF894, 0xBD00);
3240 r8168_mac_ocp_write(tp, 0xF896, 0xC502);
3241 r8168_mac_ocp_write(tp, 0xF898, 0xBD00);
3242 r8168_mac_ocp_write(tp, 0xF89A, 0x0300);
3243 r8168_mac_ocp_write(tp, 0xF89C, 0x051E);
3244 r8168_mac_ocp_write(tp, 0xF89E, 0xE434);
3245 r8168_mac_ocp_write(tp, 0xF8A0, 0xE018);
3246 r8168_mac_ocp_write(tp, 0xF8A2, 0xE092);
3247 r8168_mac_ocp_write(tp, 0xF8A4, 0xDE20);
3248 r8168_mac_ocp_write(tp, 0xF8A6, 0xD3C0);
3249 r8168_mac_ocp_write(tp, 0xF8A8, 0xC50F);
3250 r8168_mac_ocp_write(tp, 0xF8AA, 0x76A4);
3251 r8168_mac_ocp_write(tp, 0xF8AC, 0x49E3);
3252 r8168_mac_ocp_write(tp, 0xF8AE, 0xF007);
3253 r8168_mac_ocp_write(tp, 0xF8B0, 0x49C0);
3254 r8168_mac_ocp_write(tp, 0xF8B2, 0xF103);
3255 r8168_mac_ocp_write(tp, 0xF8B4, 0xC607);
3256 r8168_mac_ocp_write(tp, 0xF8B6, 0xBE00);
3257 r8168_mac_ocp_write(tp, 0xF8B8, 0xC606);
3258 r8168_mac_ocp_write(tp, 0xF8BA, 0xBE00);
3259 r8168_mac_ocp_write(tp, 0xF8BC, 0xC602);
3260 r8168_mac_ocp_write(tp, 0xF8BE, 0xBE00);
3261 r8168_mac_ocp_write(tp, 0xF8C0, 0x0C4C);
3262 r8168_mac_ocp_write(tp, 0xF8C2, 0x0C28);
3263 r8168_mac_ocp_write(tp, 0xF8C4, 0x0C2C);
3264 r8168_mac_ocp_write(tp, 0xF8C6, 0xDC00);
3265 r8168_mac_ocp_write(tp, 0xF8C8, 0xC707);
3266 r8168_mac_ocp_write(tp, 0xF8CA, 0x1D00);
3267 r8168_mac_ocp_write(tp, 0xF8CC, 0x8DE2);
3268 r8168_mac_ocp_write(tp, 0xF8CE, 0x48C1);
3269 r8168_mac_ocp_write(tp, 0xF8D0, 0xC502);
3270 r8168_mac_ocp_write(tp, 0xF8D2, 0xBD00);
3271 r8168_mac_ocp_write(tp, 0xF8D4, 0x00AA);
3272 r8168_mac_ocp_write(tp, 0xF8D6, 0xE0C0);
3273 r8168_mac_ocp_write(tp, 0xF8D8, 0xC502);
3274 r8168_mac_ocp_write(tp, 0xF8DA, 0xBD00);
3275 r8168_mac_ocp_write(tp, 0xF8DC, 0x0132);
3276
3277 r8168_mac_ocp_write(tp, 0xFC26, 0x8000);
3278
3279 r8168_mac_ocp_write(tp, 0xFC2A, 0x0743);
3280 r8168_mac_ocp_write(tp, 0xFC2C, 0x0801);
3281 r8168_mac_ocp_write(tp, 0xFC2E, 0x0BE9);
3282 r8168_mac_ocp_write(tp, 0xFC30, 0x02FD);
3283 r8168_mac_ocp_write(tp, 0xFC32, 0x0C25);
3284 r8168_mac_ocp_write(tp, 0xFC34, 0x00A9);
3285 r8168_mac_ocp_write(tp, 0xFC36, 0x012D);
3286
3287 rtl_hw_aspm_clkreq_enable(tp, true);
3288}
3289
3290static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
3291{
3292 static const struct ephy_info e_info_8168h_1[] = {
3293 { 0x1e, 0x0800, 0x0001 },
3294 { 0x1d, 0x0000, 0x0800 },
3295 { 0x05, 0xffff, 0x2089 },
3296 { 0x06, 0xffff, 0x5881 },
3297 { 0x04, 0xffff, 0x854a },
3298 { 0x01, 0xffff, 0x068b }
3299 };
3300 int rg_saw_cnt;
3301
3302
3303 rtl_hw_aspm_clkreq_enable(tp, false);
3304 rtl_ephy_init(tp, e_info_8168h_1);
3305
3306 rtl_set_fifo_size(tp, 0x08, 0x10, 0x02, 0x06);
3307 rtl8168g_set_pause_thresholds(tp, 0x38, 0x48);
3308
3309 rtl_set_def_aspm_entry_latency(tp);
3310
3311 rtl_reset_packet_filter(tp);
3312
3313 rtl_eri_set_bits(tp, 0xdc, ERIAR_MASK_1111, BIT(4));
3314
3315 rtl_eri_set_bits(tp, 0xd4, ERIAR_MASK_1111, 0x1f00);
3316
3317 rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87);
3318
3319 RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN);
3320
3321 rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
3322 rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);
3323
3324 rtl8168_config_eee_mac(tp);
3325
3326 RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN);
3327 RTL_W8(tp, MISC_1, RTL_R8(tp, MISC_1) & ~PFM_D3COLD_EN);
3328
3329 RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~TX_10M_PS_EN);
3330
3331 rtl_eri_clear_bits(tp, 0x1b0, ERIAR_MASK_0011, BIT(12));
3332
3333 rtl_pcie_state_l2l3_disable(tp);
3334
3335 rg_saw_cnt = phy_read_paged(tp->phydev, 0x0c42, 0x13) & 0x3fff;
3336 if (rg_saw_cnt > 0) {
3337 u16 sw_cnt_1ms_ini;
3338
3339 sw_cnt_1ms_ini = 16000000/rg_saw_cnt;
3340 sw_cnt_1ms_ini &= 0x0fff;
3341 r8168_mac_ocp_modify(tp, 0xd412, 0x0fff, sw_cnt_1ms_ini);
3342 }
3343
3344 r8168_mac_ocp_modify(tp, 0xe056, 0x00f0, 0x0070);
3345 r8168_mac_ocp_modify(tp, 0xe052, 0x6000, 0x8008);
3346 r8168_mac_ocp_modify(tp, 0xe0d6, 0x01ff, 0x017f);
3347 r8168_mac_ocp_modify(tp, 0xd420, 0x0fff, 0x047f);
3348
3349 r8168_mac_ocp_write(tp, 0xe63e, 0x0001);
3350 r8168_mac_ocp_write(tp, 0xe63e, 0x0000);
3351 r8168_mac_ocp_write(tp, 0xc094, 0x0000);
3352 r8168_mac_ocp_write(tp, 0xc09e, 0x0000);
3353
3354 rtl_hw_aspm_clkreq_enable(tp, true);
3355}
3356
3357static void rtl_hw_start_8168ep(struct rtl8169_private *tp)
3358{
3359 rtl8168ep_stop_cmac(tp);
3360
3361 rtl_set_fifo_size(tp, 0x08, 0x10, 0x02, 0x06);
3362 rtl8168g_set_pause_thresholds(tp, 0x2f, 0x5f);
3363
3364 rtl_set_def_aspm_entry_latency(tp);
3365
3366 rtl_reset_packet_filter(tp);
3367
3368 rtl_eri_set_bits(tp, 0xd4, ERIAR_MASK_1111, 0x1f80);
3369
3370 rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87);
3371
3372 RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN);
3373
3374 rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
3375 rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);
3376
3377 rtl8168_config_eee_mac(tp);
3378
3379 rtl_w0w1_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x06);
3380
3381 RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~TX_10M_PS_EN);
3382
3383 rtl_pcie_state_l2l3_disable(tp);
3384}
3385
3386static void rtl_hw_start_8168ep_1(struct rtl8169_private *tp)
3387{
3388 static const struct ephy_info e_info_8168ep_1[] = {
3389 { 0x00, 0xffff, 0x10ab },
3390 { 0x06, 0xffff, 0xf030 },
3391 { 0x08, 0xffff, 0x2006 },
3392 { 0x0d, 0xffff, 0x1666 },
3393 { 0x0c, 0x3ff0, 0x0000 }
3394 };
3395
3396
3397 rtl_hw_aspm_clkreq_enable(tp, false);
3398 rtl_ephy_init(tp, e_info_8168ep_1);
3399
3400 rtl_hw_start_8168ep(tp);
3401
3402 rtl_hw_aspm_clkreq_enable(tp, true);
3403}
3404
3405static void rtl_hw_start_8168ep_2(struct rtl8169_private *tp)
3406{
3407 static const struct ephy_info e_info_8168ep_2[] = {
3408 { 0x00, 0xffff, 0x10a3 },
3409 { 0x19, 0xffff, 0xfc00 },
3410 { 0x1e, 0xffff, 0x20ea }
3411 };
3412
3413
3414 rtl_hw_aspm_clkreq_enable(tp, false);
3415 rtl_ephy_init(tp, e_info_8168ep_2);
3416
3417 rtl_hw_start_8168ep(tp);
3418
3419 RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN);
3420 RTL_W8(tp, MISC_1, RTL_R8(tp, MISC_1) & ~PFM_D3COLD_EN);
3421
3422 rtl_hw_aspm_clkreq_enable(tp, true);
3423}
3424
3425static void rtl_hw_start_8168ep_3(struct rtl8169_private *tp)
3426{
3427 static const struct ephy_info e_info_8168ep_3[] = {
3428 { 0x00, 0x0000, 0x0080 },
3429 { 0x0d, 0x0100, 0x0200 },
3430 { 0x19, 0x8021, 0x0000 },
3431 { 0x1e, 0x0000, 0x2000 },
3432 };
3433
3434
3435 rtl_hw_aspm_clkreq_enable(tp, false);
3436 rtl_ephy_init(tp, e_info_8168ep_3);
3437
3438 rtl_hw_start_8168ep(tp);
3439
3440 RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN);
3441 RTL_W8(tp, MISC_1, RTL_R8(tp, MISC_1) & ~PFM_D3COLD_EN);
3442
3443 r8168_mac_ocp_modify(tp, 0xd3e2, 0x0fff, 0x0271);
3444 r8168_mac_ocp_modify(tp, 0xd3e4, 0x00ff, 0x0000);
3445 r8168_mac_ocp_modify(tp, 0xe860, 0x0000, 0x0080);
3446
3447 rtl_hw_aspm_clkreq_enable(tp, true);
3448}
3449
3450static void rtl_hw_start_8117(struct rtl8169_private *tp)
3451{
3452 static const struct ephy_info e_info_8117[] = {
3453 { 0x19, 0x0040, 0x1100 },
3454 { 0x59, 0x0040, 0x1100 },
3455 };
3456 int rg_saw_cnt;
3457
3458 rtl8168ep_stop_cmac(tp);
3459
3460
3461 rtl_hw_aspm_clkreq_enable(tp, false);
3462 rtl_ephy_init(tp, e_info_8117);
3463
3464 rtl_set_fifo_size(tp, 0x08, 0x10, 0x02, 0x06);
3465 rtl8168g_set_pause_thresholds(tp, 0x2f, 0x5f);
3466
3467 rtl_set_def_aspm_entry_latency(tp);
3468
3469 rtl_reset_packet_filter(tp);
3470
3471 rtl_eri_set_bits(tp, 0xd4, ERIAR_MASK_1111, 0x1f90);
3472
3473 rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87);
3474
3475 RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN);
3476
3477 rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
3478 rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);
3479
3480 rtl8168_config_eee_mac(tp);
3481
3482 RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN);
3483 RTL_W8(tp, MISC_1, RTL_R8(tp, MISC_1) & ~PFM_D3COLD_EN);
3484
3485 RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~TX_10M_PS_EN);
3486
3487 rtl_eri_clear_bits(tp, 0x1b0, ERIAR_MASK_0011, BIT(12));
3488
3489 rtl_pcie_state_l2l3_disable(tp);
3490
3491 rg_saw_cnt = phy_read_paged(tp->phydev, 0x0c42, 0x13) & 0x3fff;
3492 if (rg_saw_cnt > 0) {
3493 u16 sw_cnt_1ms_ini;
3494
3495 sw_cnt_1ms_ini = (16000000 / rg_saw_cnt) & 0x0fff;
3496 r8168_mac_ocp_modify(tp, 0xd412, 0x0fff, sw_cnt_1ms_ini);
3497 }
3498
3499 r8168_mac_ocp_modify(tp, 0xe056, 0x00f0, 0x0070);
3500 r8168_mac_ocp_write(tp, 0xea80, 0x0003);
3501 r8168_mac_ocp_modify(tp, 0xe052, 0x0000, 0x0009);
3502 r8168_mac_ocp_modify(tp, 0xd420, 0x0fff, 0x047f);
3503
3504 r8168_mac_ocp_write(tp, 0xe63e, 0x0001);
3505 r8168_mac_ocp_write(tp, 0xe63e, 0x0000);
3506 r8168_mac_ocp_write(tp, 0xc094, 0x0000);
3507 r8168_mac_ocp_write(tp, 0xc09e, 0x0000);
3508
3509
3510 r8169_apply_firmware(tp);
3511
3512 rtl_hw_aspm_clkreq_enable(tp, true);
3513}
3514
3515static void rtl_hw_start_8102e_1(struct rtl8169_private *tp)
3516{
3517 static const struct ephy_info e_info_8102e_1[] = {
3518 { 0x01, 0, 0x6e65 },
3519 { 0x02, 0, 0x091f },
3520 { 0x03, 0, 0xc2f9 },
3521 { 0x06, 0, 0xafb5 },
3522 { 0x07, 0, 0x0e00 },
3523 { 0x19, 0, 0xec80 },
3524 { 0x01, 0, 0x2e65 },
3525 { 0x01, 0, 0x6e65 }
3526 };
3527 u8 cfg1;
3528
3529 rtl_set_def_aspm_entry_latency(tp);
3530
3531 RTL_W8(tp, DBG_REG, FIX_NAK_1);
3532
3533 RTL_W8(tp, Config1,
3534 LEDS1 | LEDS0 | Speed_down | MEMMAP | IOMAP | VPD | PMEnable);
3535 RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
3536
3537 cfg1 = RTL_R8(tp, Config1);
3538 if ((cfg1 & LEDS0) && (cfg1 & LEDS1))
3539 RTL_W8(tp, Config1, cfg1 & ~LEDS0);
3540
3541 rtl_ephy_init(tp, e_info_8102e_1);
3542}
3543
3544static void rtl_hw_start_8102e_2(struct rtl8169_private *tp)
3545{
3546 rtl_set_def_aspm_entry_latency(tp);
3547
3548 RTL_W8(tp, Config1, MEMMAP | IOMAP | VPD | PMEnable);
3549 RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
3550}
3551
3552static void rtl_hw_start_8102e_3(struct rtl8169_private *tp)
3553{
3554 rtl_hw_start_8102e_2(tp);
3555
3556 rtl_ephy_write(tp, 0x03, 0xc2f9);
3557}
3558
3559static void rtl_hw_start_8105e_1(struct rtl8169_private *tp)
3560{
3561 static const struct ephy_info e_info_8105e_1[] = {
3562 { 0x07, 0, 0x4000 },
3563 { 0x19, 0, 0x0200 },
3564 { 0x19, 0, 0x0020 },
3565 { 0x1e, 0, 0x2000 },
3566 { 0x03, 0, 0x0001 },
3567 { 0x19, 0, 0x0100 },
3568 { 0x19, 0, 0x0004 },
3569 { 0x0a, 0, 0x0020 }
3570 };
3571
3572
3573 RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) | 0x002800);
3574
3575
3576 RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) & ~0x010000);
3577
3578 RTL_W8(tp, MCU, RTL_R8(tp, MCU) | EN_NDP | EN_OOB_RESET);
3579 RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN);
3580
3581 rtl_ephy_init(tp, e_info_8105e_1);
3582
3583 rtl_pcie_state_l2l3_disable(tp);
3584}
3585
3586static void rtl_hw_start_8105e_2(struct rtl8169_private *tp)
3587{
3588 rtl_hw_start_8105e_1(tp);
3589 rtl_ephy_write(tp, 0x1e, rtl_ephy_read(tp, 0x1e) | 0x8000);
3590}
3591
3592static void rtl_hw_start_8402(struct rtl8169_private *tp)
3593{
3594 static const struct ephy_info e_info_8402[] = {
3595 { 0x19, 0xffff, 0xff64 },
3596 { 0x1e, 0, 0x4000 }
3597 };
3598
3599 rtl_set_def_aspm_entry_latency(tp);
3600
3601
3602 RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) | 0x002800);
3603
3604 RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB);
3605
3606 rtl_ephy_init(tp, e_info_8402);
3607
3608 rtl_set_fifo_size(tp, 0x00, 0x00, 0x02, 0x06);
3609 rtl_reset_packet_filter(tp);
3610 rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
3611 rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);
3612 rtl_w0w1_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0e00, 0xff00);
3613
3614
3615 rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000);
3616
3617 rtl_pcie_state_l2l3_disable(tp);
3618}
3619
3620static void rtl_hw_start_8106(struct rtl8169_private *tp)
3621{
3622 rtl_hw_aspm_clkreq_enable(tp, false);
3623
3624
3625 RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) | 0x002800);
3626
3627 RTL_W32(tp, MISC, (RTL_R32(tp, MISC) | DISABLE_LAN_EN) & ~EARLY_TALLY_EN);
3628 RTL_W8(tp, MCU, RTL_R8(tp, MCU) | EN_NDP | EN_OOB_RESET);
3629 RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN);
3630
3631 rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000);
3632
3633
3634 rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000);
3635
3636 rtl_pcie_state_l2l3_disable(tp);
3637 rtl_hw_aspm_clkreq_enable(tp, true);
3638}
3639
3640DECLARE_RTL_COND(rtl_mac_ocp_e00e_cond)
3641{
3642 return r8168_mac_ocp_read(tp, 0xe00e) & BIT(13);
3643}
3644
3645static void rtl_hw_start_8125_common(struct rtl8169_private *tp)
3646{
3647 rtl_pcie_state_l2l3_disable(tp);
3648
3649 RTL_W16(tp, 0x382, 0x221b);
3650 RTL_W8(tp, 0x4500, 0);
3651 RTL_W16(tp, 0x4800, 0);
3652
3653
3654 r8168_mac_ocp_modify(tp, 0xd40a, 0x0010, 0x0000);
3655
3656 RTL_W8(tp, Config1, RTL_R8(tp, Config1) & ~0x10);
3657
3658 r8168_mac_ocp_write(tp, 0xc140, 0xffff);
3659 r8168_mac_ocp_write(tp, 0xc142, 0xffff);
3660
3661 r8168_mac_ocp_modify(tp, 0xd3e2, 0x0fff, 0x03a9);
3662 r8168_mac_ocp_modify(tp, 0xd3e4, 0x00ff, 0x0000);
3663 r8168_mac_ocp_modify(tp, 0xe860, 0x0000, 0x0080);
3664
3665
3666 r8168_mac_ocp_modify(tp, 0xeb58, 0x0001, 0x0000);
3667
3668 r8168_mac_ocp_modify(tp, 0xe614, 0x0700, 0x0400);
3669 r8168_mac_ocp_modify(tp, 0xe63e, 0x0c30, 0x0020);
3670 r8168_mac_ocp_modify(tp, 0xc0b4, 0x0000, 0x000c);
3671 r8168_mac_ocp_modify(tp, 0xeb6a, 0x00ff, 0x0033);
3672 r8168_mac_ocp_modify(tp, 0xeb50, 0x03e0, 0x0040);
3673 r8168_mac_ocp_modify(tp, 0xe056, 0x00f0, 0x0030);
3674 r8168_mac_ocp_modify(tp, 0xe040, 0x1000, 0x0000);
3675 r8168_mac_ocp_modify(tp, 0xe0c0, 0x4f0f, 0x4403);
3676 r8168_mac_ocp_modify(tp, 0xe052, 0x0080, 0x0067);
3677 r8168_mac_ocp_modify(tp, 0xc0ac, 0x0080, 0x1f00);
3678 r8168_mac_ocp_modify(tp, 0xd430, 0x0fff, 0x047f);
3679 r8168_mac_ocp_modify(tp, 0xe84c, 0x0000, 0x00c0);
3680 r8168_mac_ocp_modify(tp, 0xea1c, 0x0004, 0x0000);
3681 r8168_mac_ocp_modify(tp, 0xeb54, 0x0000, 0x0001);
3682 udelay(1);
3683 r8168_mac_ocp_modify(tp, 0xeb54, 0x0001, 0x0000);
3684 RTL_W16(tp, 0x1880, RTL_R16(tp, 0x1880) & ~0x0030);
3685
3686 r8168_mac_ocp_write(tp, 0xe098, 0xc302);
3687
3688 rtl_udelay_loop_wait_low(tp, &rtl_mac_ocp_e00e_cond, 1000, 10);
3689
3690 rtl8125_config_eee_mac(tp);
3691
3692 RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN);
3693 udelay(10);
3694}
3695
3696static void rtl_hw_start_8125_1(struct rtl8169_private *tp)
3697{
3698 static const struct ephy_info e_info_8125_1[] = {
3699 { 0x01, 0xffff, 0xa812 },
3700 { 0x09, 0xffff, 0x520c },
3701 { 0x04, 0xffff, 0xd000 },
3702 { 0x0d, 0xffff, 0xf702 },
3703 { 0x0a, 0xffff, 0x8653 },
3704 { 0x06, 0xffff, 0x001e },
3705 { 0x08, 0xffff, 0x3595 },
3706 { 0x20, 0xffff, 0x9455 },
3707 { 0x21, 0xffff, 0x99ff },
3708 { 0x02, 0xffff, 0x6046 },
3709 { 0x29, 0xffff, 0xfe00 },
3710 { 0x23, 0xffff, 0xab62 },
3711
3712 { 0x41, 0xffff, 0xa80c },
3713 { 0x49, 0xffff, 0x520c },
3714 { 0x44, 0xffff, 0xd000 },
3715 { 0x4d, 0xffff, 0xf702 },
3716 { 0x4a, 0xffff, 0x8653 },
3717 { 0x46, 0xffff, 0x001e },
3718 { 0x48, 0xffff, 0x3595 },
3719 { 0x60, 0xffff, 0x9455 },
3720 { 0x61, 0xffff, 0x99ff },
3721 { 0x42, 0xffff, 0x6046 },
3722 { 0x69, 0xffff, 0xfe00 },
3723 { 0x63, 0xffff, 0xab62 },
3724 };
3725
3726 rtl_set_def_aspm_entry_latency(tp);
3727
3728
3729 rtl_hw_aspm_clkreq_enable(tp, false);
3730 rtl_ephy_init(tp, e_info_8125_1);
3731
3732 rtl_hw_start_8125_common(tp);
3733}
3734
3735static void rtl_hw_start_8125_2(struct rtl8169_private *tp)
3736{
3737 static const struct ephy_info e_info_8125_2[] = {
3738 { 0x04, 0xffff, 0xd000 },
3739 { 0x0a, 0xffff, 0x8653 },
3740 { 0x23, 0xffff, 0xab66 },
3741 { 0x20, 0xffff, 0x9455 },
3742 { 0x21, 0xffff, 0x99ff },
3743 { 0x29, 0xffff, 0xfe04 },
3744
3745 { 0x44, 0xffff, 0xd000 },
3746 { 0x4a, 0xffff, 0x8653 },
3747 { 0x63, 0xffff, 0xab66 },
3748 { 0x60, 0xffff, 0x9455 },
3749 { 0x61, 0xffff, 0x99ff },
3750 { 0x69, 0xffff, 0xfe04 },
3751 };
3752
3753 rtl_set_def_aspm_entry_latency(tp);
3754
3755
3756 rtl_hw_aspm_clkreq_enable(tp, false);
3757 rtl_ephy_init(tp, e_info_8125_2);
3758
3759 rtl_hw_start_8125_common(tp);
3760}
3761
3762static void rtl_hw_config(struct rtl8169_private *tp)
3763{
3764 static const rtl_generic_fct hw_configs[] = {
3765 [RTL_GIGA_MAC_VER_07] = rtl_hw_start_8102e_1,
3766 [RTL_GIGA_MAC_VER_08] = rtl_hw_start_8102e_3,
3767 [RTL_GIGA_MAC_VER_09] = rtl_hw_start_8102e_2,
3768 [RTL_GIGA_MAC_VER_10] = NULL,
3769 [RTL_GIGA_MAC_VER_11] = rtl_hw_start_8168b,
3770 [RTL_GIGA_MAC_VER_12] = rtl_hw_start_8168b,
3771 [RTL_GIGA_MAC_VER_13] = NULL,
3772 [RTL_GIGA_MAC_VER_14] = NULL,
3773 [RTL_GIGA_MAC_VER_15] = NULL,
3774 [RTL_GIGA_MAC_VER_16] = NULL,
3775 [RTL_GIGA_MAC_VER_17] = rtl_hw_start_8168b,
3776 [RTL_GIGA_MAC_VER_18] = rtl_hw_start_8168cp_1,
3777 [RTL_GIGA_MAC_VER_19] = rtl_hw_start_8168c_1,
3778 [RTL_GIGA_MAC_VER_20] = rtl_hw_start_8168c_2,
3779 [RTL_GIGA_MAC_VER_21] = rtl_hw_start_8168c_3,
3780 [RTL_GIGA_MAC_VER_22] = rtl_hw_start_8168c_4,
3781 [RTL_GIGA_MAC_VER_23] = rtl_hw_start_8168cp_2,
3782 [RTL_GIGA_MAC_VER_24] = rtl_hw_start_8168cp_3,
3783 [RTL_GIGA_MAC_VER_25] = rtl_hw_start_8168d,
3784 [RTL_GIGA_MAC_VER_26] = rtl_hw_start_8168d,
3785 [RTL_GIGA_MAC_VER_27] = rtl_hw_start_8168d,
3786 [RTL_GIGA_MAC_VER_28] = rtl_hw_start_8168d_4,
3787 [RTL_GIGA_MAC_VER_29] = rtl_hw_start_8105e_1,
3788 [RTL_GIGA_MAC_VER_30] = rtl_hw_start_8105e_2,
3789 [RTL_GIGA_MAC_VER_31] = rtl_hw_start_8168d,
3790 [RTL_GIGA_MAC_VER_32] = rtl_hw_start_8168e_1,
3791 [RTL_GIGA_MAC_VER_33] = rtl_hw_start_8168e_1,
3792 [RTL_GIGA_MAC_VER_34] = rtl_hw_start_8168e_2,
3793 [RTL_GIGA_MAC_VER_35] = rtl_hw_start_8168f_1,
3794 [RTL_GIGA_MAC_VER_36] = rtl_hw_start_8168f_1,
3795 [RTL_GIGA_MAC_VER_37] = rtl_hw_start_8402,
3796 [RTL_GIGA_MAC_VER_38] = rtl_hw_start_8411,
3797 [RTL_GIGA_MAC_VER_39] = rtl_hw_start_8106,
3798 [RTL_GIGA_MAC_VER_40] = rtl_hw_start_8168g_1,
3799 [RTL_GIGA_MAC_VER_41] = rtl_hw_start_8168g_1,
3800 [RTL_GIGA_MAC_VER_42] = rtl_hw_start_8168g_2,
3801 [RTL_GIGA_MAC_VER_43] = rtl_hw_start_8168g_2,
3802 [RTL_GIGA_MAC_VER_44] = rtl_hw_start_8411_2,
3803 [RTL_GIGA_MAC_VER_45] = rtl_hw_start_8168h_1,
3804 [RTL_GIGA_MAC_VER_46] = rtl_hw_start_8168h_1,
3805 [RTL_GIGA_MAC_VER_47] = rtl_hw_start_8168h_1,
3806 [RTL_GIGA_MAC_VER_48] = rtl_hw_start_8168h_1,
3807 [RTL_GIGA_MAC_VER_49] = rtl_hw_start_8168ep_1,
3808 [RTL_GIGA_MAC_VER_50] = rtl_hw_start_8168ep_2,
3809 [RTL_GIGA_MAC_VER_51] = rtl_hw_start_8168ep_3,
3810 [RTL_GIGA_MAC_VER_52] = rtl_hw_start_8117,
3811 [RTL_GIGA_MAC_VER_60] = rtl_hw_start_8125_1,
3812 [RTL_GIGA_MAC_VER_61] = rtl_hw_start_8125_2,
3813 };
3814
3815 if (hw_configs[tp->mac_version])
3816 hw_configs[tp->mac_version](tp);
3817}
3818
3819static void rtl_hw_start_8125(struct rtl8169_private *tp)
3820{
3821 int i;
3822
3823
3824 for (i = 0xa00; i < 0xb00; i += 4)
3825 RTL_W32(tp, i, 0);
3826
3827 rtl_hw_config(tp);
3828}
3829
3830static void rtl_hw_start_8168(struct rtl8169_private *tp)
3831{
3832 if (rtl_is_8168evl_up(tp))
3833 RTL_W8(tp, MaxTxPacketSize, EarlySize);
3834 else
3835 RTL_W8(tp, MaxTxPacketSize, TxPacketMax);
3836
3837 rtl_hw_config(tp);
3838
3839
3840 RTL_W16(tp, IntrMitigate, 0x0000);
3841}
3842
3843static void rtl_hw_start_8169(struct rtl8169_private *tp)
3844{
3845 RTL_W8(tp, EarlyTxThres, NoEarlyTx);
3846
3847 tp->cp_cmd |= PCIMulRW;
3848
3849 if (tp->mac_version == RTL_GIGA_MAC_VER_02 ||
3850 tp->mac_version == RTL_GIGA_MAC_VER_03)
3851 tp->cp_cmd |= EnAnaPLL;
3852
3853 RTL_W16(tp, CPlusCmd, tp->cp_cmd);
3854
3855 rtl8169_set_magic_reg(tp, tp->mac_version);
3856
3857
3858 RTL_W16(tp, IntrMitigate, 0x0000);
3859}
3860
3861static void rtl_hw_start(struct rtl8169_private *tp)
3862{
3863 rtl_unlock_config_regs(tp);
3864
3865 tp->cp_cmd &= CPCMD_MASK;
3866 RTL_W16(tp, CPlusCmd, tp->cp_cmd);
3867
3868 if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
3869 rtl_hw_start_8169(tp);
3870 else if (rtl_is_8125(tp))
3871 rtl_hw_start_8125(tp);
3872 else
3873 rtl_hw_start_8168(tp);
3874
3875 rtl_set_rx_max_size(tp);
3876 rtl_set_rx_tx_desc_registers(tp);
3877 rtl_lock_config_regs(tp);
3878
3879 rtl_jumbo_config(tp);
3880
3881
3882 rtl_pci_commit(tp);
3883
3884 RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb);
3885 rtl_init_rxcfg(tp);
3886 rtl_set_tx_config_registers(tp);
3887 rtl_set_rx_mode(tp->dev);
3888 rtl_irq_enable(tp);
3889}
3890
3891static int rtl8169_change_mtu(struct net_device *dev, int new_mtu)
3892{
3893 struct rtl8169_private *tp = netdev_priv(dev);
3894
3895 dev->mtu = new_mtu;
3896 netdev_update_features(dev);
3897 rtl_jumbo_config(tp);
3898
3899 return 0;
3900}
3901
3902static inline void rtl8169_make_unusable_by_asic(struct RxDesc *desc)
3903{
3904 desc->addr = cpu_to_le64(0x0badbadbadbadbadull);
3905 desc->opts1 &= ~cpu_to_le32(DescOwn | RsvdMask);
3906}
3907
3908static inline void rtl8169_mark_to_asic(struct RxDesc *desc)
3909{
3910 u32 eor = le32_to_cpu(desc->opts1) & RingEnd;
3911
3912 desc->opts2 = 0;
3913
3914 dma_wmb();
3915
3916 desc->opts1 = cpu_to_le32(DescOwn | eor | R8169_RX_BUF_SIZE);
3917}
3918
3919static struct page *rtl8169_alloc_rx_data(struct rtl8169_private *tp,
3920 struct RxDesc *desc)
3921{
3922 struct device *d = tp_to_dev(tp);
3923 int node = dev_to_node(d);
3924 dma_addr_t mapping;
3925 struct page *data;
3926
3927 data = alloc_pages_node(node, GFP_KERNEL, get_order(R8169_RX_BUF_SIZE));
3928 if (!data)
3929 return NULL;
3930
3931 mapping = dma_map_page(d, data, 0, R8169_RX_BUF_SIZE, DMA_FROM_DEVICE);
3932 if (unlikely(dma_mapping_error(d, mapping))) {
3933 if (net_ratelimit())
3934 netif_err(tp, drv, tp->dev, "Failed to map RX DMA!\n");
3935 __free_pages(data, get_order(R8169_RX_BUF_SIZE));
3936 return NULL;
3937 }
3938
3939 desc->addr = cpu_to_le64(mapping);
3940 rtl8169_mark_to_asic(desc);
3941
3942 return data;
3943}
3944
3945static void rtl8169_rx_clear(struct rtl8169_private *tp)
3946{
3947 unsigned int i;
3948
3949 for (i = 0; i < NUM_RX_DESC && tp->Rx_databuff[i]; i++) {
3950 dma_unmap_page(tp_to_dev(tp),
3951 le64_to_cpu(tp->RxDescArray[i].addr),
3952 R8169_RX_BUF_SIZE, DMA_FROM_DEVICE);
3953 __free_pages(tp->Rx_databuff[i], get_order(R8169_RX_BUF_SIZE));
3954 tp->Rx_databuff[i] = NULL;
3955 rtl8169_make_unusable_by_asic(tp->RxDescArray + i);
3956 }
3957}
3958
3959static inline void rtl8169_mark_as_last_descriptor(struct RxDesc *desc)
3960{
3961 desc->opts1 |= cpu_to_le32(RingEnd);
3962}
3963
3964static int rtl8169_rx_fill(struct rtl8169_private *tp)
3965{
3966 unsigned int i;
3967
3968 for (i = 0; i < NUM_RX_DESC; i++) {
3969 struct page *data;
3970
3971 data = rtl8169_alloc_rx_data(tp, tp->RxDescArray + i);
3972 if (!data) {
3973 rtl8169_rx_clear(tp);
3974 return -ENOMEM;
3975 }
3976 tp->Rx_databuff[i] = data;
3977 }
3978
3979 rtl8169_mark_as_last_descriptor(tp->RxDescArray + NUM_RX_DESC - 1);
3980
3981 return 0;
3982}
3983
3984static int rtl8169_init_ring(struct rtl8169_private *tp)
3985{
3986 rtl8169_init_ring_indexes(tp);
3987
3988 memset(tp->tx_skb, 0, sizeof(tp->tx_skb));
3989 memset(tp->Rx_databuff, 0, sizeof(tp->Rx_databuff));
3990
3991 return rtl8169_rx_fill(tp);
3992}
3993
3994static void rtl8169_unmap_tx_skb(struct rtl8169_private *tp, unsigned int entry)
3995{
3996 struct ring_info *tx_skb = tp->tx_skb + entry;
3997 struct TxDesc *desc = tp->TxDescArray + entry;
3998
3999 dma_unmap_single(tp_to_dev(tp), le64_to_cpu(desc->addr), tx_skb->len,
4000 DMA_TO_DEVICE);
4001 memset(desc, 0, sizeof(*desc));
4002 memset(tx_skb, 0, sizeof(*tx_skb));
4003}
4004
4005static void rtl8169_tx_clear_range(struct rtl8169_private *tp, u32 start,
4006 unsigned int n)
4007{
4008 unsigned int i;
4009
4010 for (i = 0; i < n; i++) {
4011 unsigned int entry = (start + i) % NUM_TX_DESC;
4012 struct ring_info *tx_skb = tp->tx_skb + entry;
4013 unsigned int len = tx_skb->len;
4014
4015 if (len) {
4016 struct sk_buff *skb = tx_skb->skb;
4017
4018 rtl8169_unmap_tx_skb(tp, entry);
4019 if (skb)
4020 dev_consume_skb_any(skb);
4021 }
4022 }
4023}
4024
4025static void rtl8169_tx_clear(struct rtl8169_private *tp)
4026{
4027 rtl8169_tx_clear_range(tp, tp->dirty_tx, NUM_TX_DESC);
4028 tp->cur_tx = tp->dirty_tx = 0;
4029 netdev_reset_queue(tp->dev);
4030}
4031
4032static void rtl_reset_work(struct rtl8169_private *tp)
4033{
4034 struct net_device *dev = tp->dev;
4035 int i;
4036
4037 napi_disable(&tp->napi);
4038 netif_stop_queue(dev);
4039 synchronize_rcu();
4040
4041 rtl8169_hw_reset(tp);
4042
4043 for (i = 0; i < NUM_RX_DESC; i++)
4044 rtl8169_mark_to_asic(tp->RxDescArray + i);
4045
4046 rtl8169_tx_clear(tp);
4047 rtl8169_init_ring_indexes(tp);
4048
4049 napi_enable(&tp->napi);
4050 rtl_hw_start(tp);
4051 netif_wake_queue(dev);
4052}
4053
4054static void rtl8169_tx_timeout(struct net_device *dev, unsigned int txqueue)
4055{
4056 struct rtl8169_private *tp = netdev_priv(dev);
4057
4058 rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
4059}
4060
4061static int rtl8169_tx_map(struct rtl8169_private *tp, const u32 *opts, u32 len,
4062 void *addr, unsigned int entry, bool desc_own)
4063{
4064 struct TxDesc *txd = tp->TxDescArray + entry;
4065 struct device *d = tp_to_dev(tp);
4066 dma_addr_t mapping;
4067 u32 opts1;
4068 int ret;
4069
4070 mapping = dma_map_single(d, addr, len, DMA_TO_DEVICE);
4071 ret = dma_mapping_error(d, mapping);
4072 if (unlikely(ret)) {
4073 if (net_ratelimit())
4074 netif_err(tp, drv, tp->dev, "Failed to map TX data!\n");
4075 return ret;
4076 }
4077
4078 txd->addr = cpu_to_le64(mapping);
4079 txd->opts2 = cpu_to_le32(opts[1]);
4080
4081 opts1 = opts[0] | len;
4082 if (entry == NUM_TX_DESC - 1)
4083 opts1 |= RingEnd;
4084 if (desc_own)
4085 opts1 |= DescOwn;
4086 txd->opts1 = cpu_to_le32(opts1);
4087
4088 tp->tx_skb[entry].len = len;
4089
4090 return 0;
4091}
4092
4093static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb,
4094 const u32 *opts, unsigned int entry)
4095{
4096 struct skb_shared_info *info = skb_shinfo(skb);
4097 unsigned int cur_frag;
4098
4099 for (cur_frag = 0; cur_frag < info->nr_frags; cur_frag++) {
4100 const skb_frag_t *frag = info->frags + cur_frag;
4101 void *addr = skb_frag_address(frag);
4102 u32 len = skb_frag_size(frag);
4103
4104 entry = (entry + 1) % NUM_TX_DESC;
4105
4106 if (unlikely(rtl8169_tx_map(tp, opts, len, addr, entry, true)))
4107 goto err_out;
4108 }
4109
4110 return 0;
4111
4112err_out:
4113 rtl8169_tx_clear_range(tp, tp->cur_tx + 1, cur_frag);
4114 return -EIO;
4115}
4116
4117static bool rtl_test_hw_pad_bug(struct rtl8169_private *tp, struct sk_buff *skb)
4118{
4119 return skb->len < ETH_ZLEN && tp->mac_version == RTL_GIGA_MAC_VER_34;
4120}
4121
4122static void rtl8169_tso_csum_v1(struct sk_buff *skb, u32 *opts)
4123{
4124 u32 mss = skb_shinfo(skb)->gso_size;
4125
4126 if (mss) {
4127 opts[0] |= TD_LSO;
4128 opts[0] |= mss << TD0_MSS_SHIFT;
4129 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
4130 const struct iphdr *ip = ip_hdr(skb);
4131
4132 if (ip->protocol == IPPROTO_TCP)
4133 opts[0] |= TD0_IP_CS | TD0_TCP_CS;
4134 else if (ip->protocol == IPPROTO_UDP)
4135 opts[0] |= TD0_IP_CS | TD0_UDP_CS;
4136 else
4137 WARN_ON_ONCE(1);
4138 }
4139}
4140
4141static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp,
4142 struct sk_buff *skb, u32 *opts)
4143{
4144 u32 transport_offset = (u32)skb_transport_offset(skb);
4145 u32 mss = skb_shinfo(skb)->gso_size;
4146
4147 if (mss) {
4148 switch (vlan_get_protocol(skb)) {
4149 case htons(ETH_P_IP):
4150 opts[0] |= TD1_GTSENV4;
4151 break;
4152
4153 case htons(ETH_P_IPV6):
4154 if (skb_cow_head(skb, 0))
4155 return false;
4156
4157 tcp_v6_gso_csum_prep(skb);
4158 opts[0] |= TD1_GTSENV6;
4159 break;
4160
4161 default:
4162 WARN_ON_ONCE(1);
4163 break;
4164 }
4165
4166 opts[0] |= transport_offset << GTTCPHO_SHIFT;
4167 opts[1] |= mss << TD1_MSS_SHIFT;
4168 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
4169 u8 ip_protocol;
4170
4171 switch (vlan_get_protocol(skb)) {
4172 case htons(ETH_P_IP):
4173 opts[1] |= TD1_IPv4_CS;
4174 ip_protocol = ip_hdr(skb)->protocol;
4175 break;
4176
4177 case htons(ETH_P_IPV6):
4178 opts[1] |= TD1_IPv6_CS;
4179 ip_protocol = ipv6_hdr(skb)->nexthdr;
4180 break;
4181
4182 default:
4183 ip_protocol = IPPROTO_RAW;
4184 break;
4185 }
4186
4187 if (ip_protocol == IPPROTO_TCP)
4188 opts[1] |= TD1_TCP_CS;
4189 else if (ip_protocol == IPPROTO_UDP)
4190 opts[1] |= TD1_UDP_CS;
4191 else
4192 WARN_ON_ONCE(1);
4193
4194 opts[1] |= transport_offset << TCPHO_SHIFT;
4195 } else {
4196 if (unlikely(rtl_test_hw_pad_bug(tp, skb)))
4197 return !eth_skb_pad(skb);
4198 }
4199
4200 return true;
4201}
4202
4203static bool rtl_tx_slots_avail(struct rtl8169_private *tp,
4204 unsigned int nr_frags)
4205{
4206 unsigned int slots_avail = tp->dirty_tx + NUM_TX_DESC - tp->cur_tx;
4207
4208
4209 return slots_avail > nr_frags;
4210}
4211
4212
4213static bool rtl_chip_supports_csum_v2(struct rtl8169_private *tp)
4214{
4215 switch (tp->mac_version) {
4216 case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06:
4217 case RTL_GIGA_MAC_VER_10 ... RTL_GIGA_MAC_VER_17:
4218 return false;
4219 default:
4220 return true;
4221 }
4222}
4223
4224static void rtl8169_doorbell(struct rtl8169_private *tp)
4225{
4226 if (rtl_is_8125(tp))
4227 RTL_W16(tp, TxPoll_8125, BIT(0));
4228 else
4229 RTL_W8(tp, TxPoll, NPQ);
4230}
4231
4232static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
4233 struct net_device *dev)
4234{
4235 unsigned int frags = skb_shinfo(skb)->nr_frags;
4236 struct rtl8169_private *tp = netdev_priv(dev);
4237 unsigned int entry = tp->cur_tx % NUM_TX_DESC;
4238 struct TxDesc *txd_first, *txd_last;
4239 bool stop_queue, door_bell;
4240 u32 opts[2];
4241
4242 txd_first = tp->TxDescArray + entry;
4243
4244 if (unlikely(!rtl_tx_slots_avail(tp, frags))) {
4245 netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n");
4246 goto err_stop_0;
4247 }
4248
4249 if (unlikely(le32_to_cpu(txd_first->opts1) & DescOwn))
4250 goto err_stop_0;
4251
4252 opts[1] = rtl8169_tx_vlan_tag(skb);
4253 opts[0] = 0;
4254
4255 if (!rtl_chip_supports_csum_v2(tp))
4256 rtl8169_tso_csum_v1(skb, opts);
4257 else if (!rtl8169_tso_csum_v2(tp, skb, opts))
4258 goto err_dma_0;
4259
4260 if (unlikely(rtl8169_tx_map(tp, opts, skb_headlen(skb), skb->data,
4261 entry, false)))
4262 goto err_dma_0;
4263
4264 if (frags) {
4265 if (rtl8169_xmit_frags(tp, skb, opts, entry))
4266 goto err_dma_1;
4267 entry = (entry + frags) % NUM_TX_DESC;
4268 }
4269
4270 txd_last = tp->TxDescArray + entry;
4271 txd_last->opts1 |= cpu_to_le32(LastFrag);
4272 tp->tx_skb[entry].skb = skb;
4273
4274 skb_tx_timestamp(skb);
4275
4276
4277 dma_wmb();
4278
4279 door_bell = __netdev_sent_queue(dev, skb->len, netdev_xmit_more());
4280
4281 txd_first->opts1 |= cpu_to_le32(DescOwn | FirstFrag);
4282
4283
4284 wmb();
4285
4286 tp->cur_tx += frags + 1;
4287
4288 stop_queue = !rtl_tx_slots_avail(tp, MAX_SKB_FRAGS);
4289 if (unlikely(stop_queue)) {
4290
4291
4292
4293 smp_wmb();
4294 netif_stop_queue(dev);
4295 door_bell = true;
4296 }
4297
4298 if (door_bell)
4299 rtl8169_doorbell(tp);
4300
4301 if (unlikely(stop_queue)) {
4302
4303
4304
4305
4306
4307
4308
4309 smp_mb();
4310 if (rtl_tx_slots_avail(tp, MAX_SKB_FRAGS))
4311 netif_start_queue(dev);
4312 }
4313
4314 return NETDEV_TX_OK;
4315
4316err_dma_1:
4317 rtl8169_unmap_tx_skb(tp, entry);
4318err_dma_0:
4319 dev_kfree_skb_any(skb);
4320 dev->stats.tx_dropped++;
4321 return NETDEV_TX_OK;
4322
4323err_stop_0:
4324 netif_stop_queue(dev);
4325 dev->stats.tx_dropped++;
4326 return NETDEV_TX_BUSY;
4327}
4328
4329static netdev_features_t rtl8169_features_check(struct sk_buff *skb,
4330 struct net_device *dev,
4331 netdev_features_t features)
4332{
4333 int transport_offset = skb_transport_offset(skb);
4334 struct rtl8169_private *tp = netdev_priv(dev);
4335
4336 if (skb_is_gso(skb)) {
4337 if (transport_offset > GTTCPHO_MAX &&
4338 rtl_chip_supports_csum_v2(tp))
4339 features &= ~NETIF_F_ALL_TSO;
4340 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
4341 if (skb->len < ETH_ZLEN) {
4342 switch (tp->mac_version) {
4343 case RTL_GIGA_MAC_VER_11:
4344 case RTL_GIGA_MAC_VER_12:
4345 case RTL_GIGA_MAC_VER_17:
4346 case RTL_GIGA_MAC_VER_34:
4347 features &= ~NETIF_F_CSUM_MASK;
4348 break;
4349 default:
4350 break;
4351 }
4352 }
4353
4354 if (transport_offset > TCPHO_MAX &&
4355 rtl_chip_supports_csum_v2(tp))
4356 features &= ~NETIF_F_CSUM_MASK;
4357 }
4358
4359 return vlan_features_check(skb, features);
4360}
4361
4362static void rtl8169_pcierr_interrupt(struct net_device *dev)
4363{
4364 struct rtl8169_private *tp = netdev_priv(dev);
4365 struct pci_dev *pdev = tp->pci_dev;
4366 int pci_status_errs;
4367 u16 pci_cmd;
4368
4369 pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
4370
4371 pci_status_errs = pci_status_get_and_clear_errors(pdev);
4372
4373 netif_err(tp, intr, dev, "PCI error (cmd = 0x%04x, status_errs = 0x%04x)\n",
4374 pci_cmd, pci_status_errs);
4375
4376
4377
4378
4379
4380
4381
4382
4383
4384 if (pdev->broken_parity_status)
4385 pci_cmd &= ~PCI_COMMAND_PARITY;
4386 else
4387 pci_cmd |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY;
4388
4389 pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
4390
4391 rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
4392}
4393
4394static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp,
4395 int budget)
4396{
4397 unsigned int dirty_tx, tx_left, bytes_compl = 0, pkts_compl = 0;
4398
4399 dirty_tx = tp->dirty_tx;
4400 smp_rmb();
4401
4402 for (tx_left = tp->cur_tx - dirty_tx; tx_left > 0; tx_left--) {
4403 unsigned int entry = dirty_tx % NUM_TX_DESC;
4404 struct sk_buff *skb = tp->tx_skb[entry].skb;
4405 u32 status;
4406
4407 status = le32_to_cpu(tp->TxDescArray[entry].opts1);
4408 if (status & DescOwn)
4409 break;
4410
4411 rtl8169_unmap_tx_skb(tp, entry);
4412
4413 if (skb) {
4414 pkts_compl++;
4415 bytes_compl += skb->len;
4416 napi_consume_skb(skb, budget);
4417 }
4418 dirty_tx++;
4419 }
4420
4421 if (tp->dirty_tx != dirty_tx) {
4422 netdev_completed_queue(dev, pkts_compl, bytes_compl);
4423
4424 u64_stats_update_begin(&tp->tx_stats.syncp);
4425 tp->tx_stats.packets += pkts_compl;
4426 tp->tx_stats.bytes += bytes_compl;
4427 u64_stats_update_end(&tp->tx_stats.syncp);
4428
4429 tp->dirty_tx = dirty_tx;
4430
4431
4432
4433
4434
4435
4436
4437 smp_mb();
4438 if (netif_queue_stopped(dev) &&
4439 rtl_tx_slots_avail(tp, MAX_SKB_FRAGS)) {
4440 netif_wake_queue(dev);
4441 }
4442
4443
4444
4445
4446
4447
4448 if (tp->cur_tx != dirty_tx)
4449 rtl8169_doorbell(tp);
4450 }
4451}
4452
4453static inline int rtl8169_fragmented_frame(u32 status)
4454{
4455 return (status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag);
4456}
4457
4458static inline void rtl8169_rx_csum(struct sk_buff *skb, u32 opts1)
4459{
4460 u32 status = opts1 & RxProtoMask;
4461
4462 if (((status == RxProtoTCP) && !(opts1 & TCPFail)) ||
4463 ((status == RxProtoUDP) && !(opts1 & UDPFail)))
4464 skb->ip_summed = CHECKSUM_UNNECESSARY;
4465 else
4466 skb_checksum_none_assert(skb);
4467}
4468
4469static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, u32 budget)
4470{
4471 unsigned int cur_rx, rx_left;
4472 unsigned int count;
4473
4474 cur_rx = tp->cur_rx;
4475
4476 for (rx_left = min(budget, NUM_RX_DESC); rx_left > 0; rx_left--, cur_rx++) {
4477 unsigned int entry = cur_rx % NUM_RX_DESC;
4478 const void *rx_buf = page_address(tp->Rx_databuff[entry]);
4479 struct RxDesc *desc = tp->RxDescArray + entry;
4480 u32 status;
4481
4482 status = le32_to_cpu(desc->opts1);
4483 if (status & DescOwn)
4484 break;
4485
4486
4487
4488
4489
4490 dma_rmb();
4491
4492 if (unlikely(status & RxRES)) {
4493 netif_info(tp, rx_err, dev, "Rx ERROR. status = %08x\n",
4494 status);
4495 dev->stats.rx_errors++;
4496 if (status & (RxRWT | RxRUNT))
4497 dev->stats.rx_length_errors++;
4498 if (status & RxCRC)
4499 dev->stats.rx_crc_errors++;
4500 if (status & (RxRUNT | RxCRC) && !(status & RxRWT) &&
4501 dev->features & NETIF_F_RXALL) {
4502 goto process_pkt;
4503 }
4504 } else {
4505 unsigned int pkt_size;
4506 struct sk_buff *skb;
4507
4508process_pkt:
4509 pkt_size = status & GENMASK(13, 0);
4510 if (likely(!(dev->features & NETIF_F_RXFCS)))
4511 pkt_size -= ETH_FCS_LEN;
4512
4513
4514
4515
4516
4517 if (unlikely(rtl8169_fragmented_frame(status))) {
4518 dev->stats.rx_dropped++;
4519 dev->stats.rx_length_errors++;
4520 goto release_descriptor;
4521 }
4522
4523 skb = napi_alloc_skb(&tp->napi, pkt_size);
4524 if (unlikely(!skb)) {
4525 dev->stats.rx_dropped++;
4526 goto release_descriptor;
4527 }
4528
4529 dma_sync_single_for_cpu(tp_to_dev(tp),
4530 le64_to_cpu(desc->addr),
4531 pkt_size, DMA_FROM_DEVICE);
4532 prefetch(rx_buf);
4533 skb_copy_to_linear_data(skb, rx_buf, pkt_size);
4534 skb->tail += pkt_size;
4535 skb->len = pkt_size;
4536
4537 dma_sync_single_for_device(tp_to_dev(tp),
4538 le64_to_cpu(desc->addr),
4539 pkt_size, DMA_FROM_DEVICE);
4540
4541 rtl8169_rx_csum(skb, status);
4542 skb->protocol = eth_type_trans(skb, dev);
4543
4544 rtl8169_rx_vlan_tag(desc, skb);
4545
4546 if (skb->pkt_type == PACKET_MULTICAST)
4547 dev->stats.multicast++;
4548
4549 napi_gro_receive(&tp->napi, skb);
4550
4551 u64_stats_update_begin(&tp->rx_stats.syncp);
4552 tp->rx_stats.packets++;
4553 tp->rx_stats.bytes += pkt_size;
4554 u64_stats_update_end(&tp->rx_stats.syncp);
4555 }
4556release_descriptor:
4557 rtl8169_mark_to_asic(desc);
4558 }
4559
4560 count = cur_rx - tp->cur_rx;
4561 tp->cur_rx = cur_rx;
4562
4563 return count;
4564}
4565
4566static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
4567{
4568 struct rtl8169_private *tp = dev_instance;
4569 u32 status = rtl_get_events(tp);
4570
4571 if (!tp->irq_enabled || (status & 0xffff) == 0xffff ||
4572 !(status & tp->irq_mask))
4573 return IRQ_NONE;
4574
4575 if (unlikely(status & SYSErr)) {
4576 rtl8169_pcierr_interrupt(tp->dev);
4577 goto out;
4578 }
4579
4580 if (status & LinkChg)
4581 phy_mac_interrupt(tp->phydev);
4582
4583 if (unlikely(status & RxFIFOOver &&
4584 tp->mac_version == RTL_GIGA_MAC_VER_11)) {
4585 netif_stop_queue(tp->dev);
4586 rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
4587 }
4588
4589 rtl_irq_disable(tp);
4590 napi_schedule_irqoff(&tp->napi);
4591out:
4592 rtl_ack_events(tp, status);
4593
4594 return IRQ_HANDLED;
4595}
4596
4597static void rtl_task(struct work_struct *work)
4598{
4599 struct rtl8169_private *tp =
4600 container_of(work, struct rtl8169_private, wk.work);
4601
4602 rtl_lock_work(tp);
4603
4604 if (!netif_running(tp->dev) ||
4605 !test_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags))
4606 goto out_unlock;
4607
4608 if (test_and_clear_bit(RTL_FLAG_TASK_RESET_PENDING, tp->wk.flags))
4609 rtl_reset_work(tp);
4610out_unlock:
4611 rtl_unlock_work(tp);
4612}
4613
4614static int rtl8169_poll(struct napi_struct *napi, int budget)
4615{
4616 struct rtl8169_private *tp = container_of(napi, struct rtl8169_private, napi);
4617 struct net_device *dev = tp->dev;
4618 int work_done;
4619
4620 work_done = rtl_rx(dev, tp, (u32) budget);
4621
4622 rtl_tx(dev, tp, budget);
4623
4624 if (work_done < budget) {
4625 napi_complete_done(napi, work_done);
4626 rtl_irq_enable(tp);
4627 }
4628
4629 return work_done;
4630}
4631
4632static void r8169_phylink_handler(struct net_device *ndev)
4633{
4634 struct rtl8169_private *tp = netdev_priv(ndev);
4635
4636 if (netif_carrier_ok(ndev)) {
4637 rtl_link_chg_patch(tp);
4638 pm_request_resume(&tp->pci_dev->dev);
4639 } else {
4640 pm_runtime_idle(&tp->pci_dev->dev);
4641 }
4642
4643 if (net_ratelimit())
4644 phy_print_status(tp->phydev);
4645}
4646
4647static int r8169_phy_connect(struct rtl8169_private *tp)
4648{
4649 struct phy_device *phydev = tp->phydev;
4650 phy_interface_t phy_mode;
4651 int ret;
4652
4653 phy_mode = tp->supports_gmii ? PHY_INTERFACE_MODE_GMII :
4654 PHY_INTERFACE_MODE_MII;
4655
4656 ret = phy_connect_direct(tp->dev, phydev, r8169_phylink_handler,
4657 phy_mode);
4658 if (ret)
4659 return ret;
4660
4661 if (!tp->supports_gmii)
4662 phy_set_max_speed(phydev, SPEED_100);
4663
4664 phy_support_asym_pause(phydev);
4665
4666 phy_attached_info(phydev);
4667
4668 return 0;
4669}
4670
4671static void rtl8169_down(struct net_device *dev)
4672{
4673 struct rtl8169_private *tp = netdev_priv(dev);
4674
4675 phy_stop(tp->phydev);
4676
4677 napi_disable(&tp->napi);
4678 netif_stop_queue(dev);
4679
4680 rtl8169_hw_reset(tp);
4681
4682
4683 synchronize_rcu();
4684
4685 rtl8169_tx_clear(tp);
4686
4687 rtl8169_rx_clear(tp);
4688
4689 rtl_pll_power_down(tp);
4690}
4691
4692static int rtl8169_close(struct net_device *dev)
4693{
4694 struct rtl8169_private *tp = netdev_priv(dev);
4695 struct pci_dev *pdev = tp->pci_dev;
4696
4697 pm_runtime_get_sync(&pdev->dev);
4698
4699
4700 rtl8169_update_counters(tp);
4701
4702 rtl_lock_work(tp);
4703
4704 bitmap_zero(tp->wk.flags, RTL_FLAG_MAX);
4705
4706 rtl8169_down(dev);
4707 rtl_unlock_work(tp);
4708
4709 cancel_work_sync(&tp->wk.work);
4710
4711 phy_disconnect(tp->phydev);
4712
4713 pci_free_irq(pdev, 0, tp);
4714
4715 dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
4716 tp->RxPhyAddr);
4717 dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
4718 tp->TxPhyAddr);
4719 tp->TxDescArray = NULL;
4720 tp->RxDescArray = NULL;
4721
4722 pm_runtime_put_sync(&pdev->dev);
4723
4724 return 0;
4725}
4726
4727#ifdef CONFIG_NET_POLL_CONTROLLER
4728static void rtl8169_netpoll(struct net_device *dev)
4729{
4730 struct rtl8169_private *tp = netdev_priv(dev);
4731
4732 rtl8169_interrupt(pci_irq_vector(tp->pci_dev, 0), tp);
4733}
4734#endif
4735
4736static int rtl_open(struct net_device *dev)
4737{
4738 struct rtl8169_private *tp = netdev_priv(dev);
4739 struct pci_dev *pdev = tp->pci_dev;
4740 int retval = -ENOMEM;
4741
4742 pm_runtime_get_sync(&pdev->dev);
4743
4744
4745
4746
4747
4748 tp->TxDescArray = dma_alloc_coherent(&pdev->dev, R8169_TX_RING_BYTES,
4749 &tp->TxPhyAddr, GFP_KERNEL);
4750 if (!tp->TxDescArray)
4751 goto err_pm_runtime_put;
4752
4753 tp->RxDescArray = dma_alloc_coherent(&pdev->dev, R8169_RX_RING_BYTES,
4754 &tp->RxPhyAddr, GFP_KERNEL);
4755 if (!tp->RxDescArray)
4756 goto err_free_tx_0;
4757
4758 retval = rtl8169_init_ring(tp);
4759 if (retval < 0)
4760 goto err_free_rx_1;
4761
4762 rtl_request_firmware(tp);
4763
4764 retval = pci_request_irq(pdev, 0, rtl8169_interrupt, NULL, tp,
4765 dev->name);
4766 if (retval < 0)
4767 goto err_release_fw_2;
4768
4769 retval = r8169_phy_connect(tp);
4770 if (retval)
4771 goto err_free_irq;
4772
4773 rtl_lock_work(tp);
4774
4775 set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
4776
4777 napi_enable(&tp->napi);
4778
4779 rtl8169_init_phy(tp);
4780
4781 rtl_pll_power_up(tp);
4782
4783 rtl_hw_start(tp);
4784
4785 if (!rtl8169_init_counter_offsets(tp))
4786 netif_warn(tp, hw, dev, "counter reset/update failed\n");
4787
4788 phy_start(tp->phydev);
4789 netif_start_queue(dev);
4790
4791 rtl_unlock_work(tp);
4792
4793 pm_runtime_put_sync(&pdev->dev);
4794out:
4795 return retval;
4796
4797err_free_irq:
4798 pci_free_irq(pdev, 0, tp);
4799err_release_fw_2:
4800 rtl_release_firmware(tp);
4801 rtl8169_rx_clear(tp);
4802err_free_rx_1:
4803 dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
4804 tp->RxPhyAddr);
4805 tp->RxDescArray = NULL;
4806err_free_tx_0:
4807 dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
4808 tp->TxPhyAddr);
4809 tp->TxDescArray = NULL;
4810err_pm_runtime_put:
4811 pm_runtime_put_noidle(&pdev->dev);
4812 goto out;
4813}
4814
4815static void
4816rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
4817{
4818 struct rtl8169_private *tp = netdev_priv(dev);
4819 struct pci_dev *pdev = tp->pci_dev;
4820 struct rtl8169_counters *counters = tp->counters;
4821 unsigned int start;
4822
4823 pm_runtime_get_noresume(&pdev->dev);
4824
4825 netdev_stats_to_stats64(stats, &dev->stats);
4826
4827 do {
4828 start = u64_stats_fetch_begin_irq(&tp->rx_stats.syncp);
4829 stats->rx_packets = tp->rx_stats.packets;
4830 stats->rx_bytes = tp->rx_stats.bytes;
4831 } while (u64_stats_fetch_retry_irq(&tp->rx_stats.syncp, start));
4832
4833 do {
4834 start = u64_stats_fetch_begin_irq(&tp->tx_stats.syncp);
4835 stats->tx_packets = tp->tx_stats.packets;
4836 stats->tx_bytes = tp->tx_stats.bytes;
4837 } while (u64_stats_fetch_retry_irq(&tp->tx_stats.syncp, start));
4838
4839
4840
4841
4842
4843 if (pm_runtime_active(&pdev->dev))
4844 rtl8169_update_counters(tp);
4845
4846
4847
4848
4849
4850 stats->tx_errors = le64_to_cpu(counters->tx_errors) -
4851 le64_to_cpu(tp->tc_offset.tx_errors);
4852 stats->collisions = le32_to_cpu(counters->tx_multi_collision) -
4853 le32_to_cpu(tp->tc_offset.tx_multi_collision);
4854 stats->tx_aborted_errors = le16_to_cpu(counters->tx_aborted) -
4855 le16_to_cpu(tp->tc_offset.tx_aborted);
4856 stats->rx_missed_errors = le16_to_cpu(counters->rx_missed) -
4857 le16_to_cpu(tp->tc_offset.rx_missed);
4858
4859 pm_runtime_put_noidle(&pdev->dev);
4860}
4861
4862static void rtl8169_net_suspend(struct net_device *dev)
4863{
4864 struct rtl8169_private *tp = netdev_priv(dev);
4865
4866 if (!netif_running(dev))
4867 return;
4868
4869 phy_stop(tp->phydev);
4870 netif_device_detach(dev);
4871
4872 rtl_lock_work(tp);
4873 napi_disable(&tp->napi);
4874
4875 bitmap_zero(tp->wk.flags, RTL_FLAG_MAX);
4876
4877 rtl_unlock_work(tp);
4878
4879 rtl_pll_power_down(tp);
4880}
4881
4882#ifdef CONFIG_PM
4883
4884static int rtl8169_suspend(struct device *device)
4885{
4886 struct net_device *dev = dev_get_drvdata(device);
4887 struct rtl8169_private *tp = netdev_priv(dev);
4888
4889 rtl8169_net_suspend(dev);
4890 clk_disable_unprepare(tp->clk);
4891
4892 return 0;
4893}
4894
4895static void __rtl8169_resume(struct net_device *dev)
4896{
4897 struct rtl8169_private *tp = netdev_priv(dev);
4898
4899 netif_device_attach(dev);
4900
4901 rtl_pll_power_up(tp);
4902 rtl8169_init_phy(tp);
4903
4904 phy_start(tp->phydev);
4905
4906 rtl_lock_work(tp);
4907 napi_enable(&tp->napi);
4908 set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
4909 rtl_reset_work(tp);
4910 rtl_unlock_work(tp);
4911}
4912
4913static int rtl8169_resume(struct device *device)
4914{
4915 struct net_device *dev = dev_get_drvdata(device);
4916 struct rtl8169_private *tp = netdev_priv(dev);
4917
4918 rtl_rar_set(tp, dev->dev_addr);
4919
4920 clk_prepare_enable(tp->clk);
4921
4922 if (netif_running(dev))
4923 __rtl8169_resume(dev);
4924
4925 return 0;
4926}
4927
4928static int rtl8169_runtime_suspend(struct device *device)
4929{
4930 struct net_device *dev = dev_get_drvdata(device);
4931 struct rtl8169_private *tp = netdev_priv(dev);
4932
4933 if (!tp->TxDescArray)
4934 return 0;
4935
4936 rtl_lock_work(tp);
4937 __rtl8169_set_wol(tp, WAKE_ANY);
4938 rtl_unlock_work(tp);
4939
4940 rtl8169_net_suspend(dev);
4941
4942
4943 rtl8169_update_counters(tp);
4944
4945 return 0;
4946}
4947
4948static int rtl8169_runtime_resume(struct device *device)
4949{
4950 struct net_device *dev = dev_get_drvdata(device);
4951 struct rtl8169_private *tp = netdev_priv(dev);
4952
4953 rtl_rar_set(tp, dev->dev_addr);
4954
4955 if (!tp->TxDescArray)
4956 return 0;
4957
4958 rtl_lock_work(tp);
4959 __rtl8169_set_wol(tp, tp->saved_wolopts);
4960 rtl_unlock_work(tp);
4961
4962 __rtl8169_resume(dev);
4963
4964 return 0;
4965}
4966
4967static int rtl8169_runtime_idle(struct device *device)
4968{
4969 struct net_device *dev = dev_get_drvdata(device);
4970
4971 if (!netif_running(dev) || !netif_carrier_ok(dev))
4972 pm_schedule_suspend(device, 10000);
4973
4974 return -EBUSY;
4975}
4976
4977static const struct dev_pm_ops rtl8169_pm_ops = {
4978 .suspend = rtl8169_suspend,
4979 .resume = rtl8169_resume,
4980 .freeze = rtl8169_suspend,
4981 .thaw = rtl8169_resume,
4982 .poweroff = rtl8169_suspend,
4983 .restore = rtl8169_resume,
4984 .runtime_suspend = rtl8169_runtime_suspend,
4985 .runtime_resume = rtl8169_runtime_resume,
4986 .runtime_idle = rtl8169_runtime_idle,
4987};
4988
4989#define RTL8169_PM_OPS (&rtl8169_pm_ops)
4990
4991#else
4992
4993#define RTL8169_PM_OPS NULL
4994
4995#endif
4996
4997static void rtl_wol_shutdown_quirk(struct rtl8169_private *tp)
4998{
4999
5000 switch (tp->mac_version) {
5001 case RTL_GIGA_MAC_VER_11:
5002 case RTL_GIGA_MAC_VER_12:
5003 case RTL_GIGA_MAC_VER_17:
5004 pci_clear_master(tp->pci_dev);
5005
5006 RTL_W8(tp, ChipCmd, CmdRxEnb);
5007 rtl_pci_commit(tp);
5008 break;
5009 default:
5010 break;
5011 }
5012}
5013
5014static void rtl_shutdown(struct pci_dev *pdev)
5015{
5016 struct net_device *dev = pci_get_drvdata(pdev);
5017 struct rtl8169_private *tp = netdev_priv(dev);
5018
5019 rtl8169_net_suspend(dev);
5020
5021
5022 rtl_rar_set(tp, dev->perm_addr);
5023
5024 rtl8169_hw_reset(tp);
5025
5026 if (system_state == SYSTEM_POWER_OFF) {
5027 if (tp->saved_wolopts) {
5028 rtl_wol_suspend_quirk(tp);
5029 rtl_wol_shutdown_quirk(tp);
5030 }
5031
5032 pci_wake_from_d3(pdev, true);
5033 pci_set_power_state(pdev, PCI_D3hot);
5034 }
5035}
5036
5037static void rtl_remove_one(struct pci_dev *pdev)
5038{
5039 struct net_device *dev = pci_get_drvdata(pdev);
5040 struct rtl8169_private *tp = netdev_priv(dev);
5041
5042 if (r8168_check_dash(tp))
5043 rtl8168_driver_stop(tp);
5044
5045 netif_napi_del(&tp->napi);
5046
5047 unregister_netdev(dev);
5048 mdiobus_unregister(tp->phydev->mdio.bus);
5049
5050 rtl_release_firmware(tp);
5051
5052 if (pci_dev_run_wake(pdev))
5053 pm_runtime_get_noresume(&pdev->dev);
5054
5055
5056 rtl_rar_set(tp, dev->perm_addr);
5057}
5058
5059static const struct net_device_ops rtl_netdev_ops = {
5060 .ndo_open = rtl_open,
5061 .ndo_stop = rtl8169_close,
5062 .ndo_get_stats64 = rtl8169_get_stats64,
5063 .ndo_start_xmit = rtl8169_start_xmit,
5064 .ndo_features_check = rtl8169_features_check,
5065 .ndo_tx_timeout = rtl8169_tx_timeout,
5066 .ndo_validate_addr = eth_validate_addr,
5067 .ndo_change_mtu = rtl8169_change_mtu,
5068 .ndo_fix_features = rtl8169_fix_features,
5069 .ndo_set_features = rtl8169_set_features,
5070 .ndo_set_mac_address = rtl_set_mac_address,
5071 .ndo_do_ioctl = phy_do_ioctl_running,
5072 .ndo_set_rx_mode = rtl_set_rx_mode,
5073#ifdef CONFIG_NET_POLL_CONTROLLER
5074 .ndo_poll_controller = rtl8169_netpoll,
5075#endif
5076
5077};
5078
5079static void rtl_set_irq_mask(struct rtl8169_private *tp)
5080{
5081 tp->irq_mask = RxOK | RxErr | TxOK | TxErr | LinkChg;
5082
5083 if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
5084 tp->irq_mask |= SYSErr | RxOverflow | RxFIFOOver;
5085 else if (tp->mac_version == RTL_GIGA_MAC_VER_11)
5086
5087 tp->irq_mask |= RxFIFOOver;
5088 else
5089 tp->irq_mask |= RxOverflow;
5090}
5091
5092static int rtl_alloc_irq(struct rtl8169_private *tp)
5093{
5094 unsigned int flags;
5095
5096 switch (tp->mac_version) {
5097 case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06:
5098 rtl_unlock_config_regs(tp);
5099 RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~MSIEnable);
5100 rtl_lock_config_regs(tp);
5101
5102 case RTL_GIGA_MAC_VER_07 ... RTL_GIGA_MAC_VER_17:
5103 flags = PCI_IRQ_LEGACY;
5104 break;
5105 default:
5106 flags = PCI_IRQ_ALL_TYPES;
5107 break;
5108 }
5109
5110 return pci_alloc_irq_vectors(tp->pci_dev, 1, 1, flags);
5111}
5112
5113static void rtl_read_mac_address(struct rtl8169_private *tp,
5114 u8 mac_addr[ETH_ALEN])
5115{
5116
5117 if (rtl_is_8168evl_up(tp) && tp->mac_version != RTL_GIGA_MAC_VER_34) {
5118 u32 value = rtl_eri_read(tp, 0xe0);
5119
5120 mac_addr[0] = (value >> 0) & 0xff;
5121 mac_addr[1] = (value >> 8) & 0xff;
5122 mac_addr[2] = (value >> 16) & 0xff;
5123 mac_addr[3] = (value >> 24) & 0xff;
5124
5125 value = rtl_eri_read(tp, 0xe4);
5126 mac_addr[4] = (value >> 0) & 0xff;
5127 mac_addr[5] = (value >> 8) & 0xff;
5128 } else if (rtl_is_8125(tp)) {
5129 rtl_read_mac_from_reg(tp, mac_addr, MAC0_BKP);
5130 }
5131}
5132
5133DECLARE_RTL_COND(rtl_link_list_ready_cond)
5134{
5135 return RTL_R8(tp, MCU) & LINK_LIST_RDY;
5136}
5137
5138DECLARE_RTL_COND(rtl_rxtx_empty_cond)
5139{
5140 return (RTL_R8(tp, MCU) & RXTX_EMPTY) == RXTX_EMPTY;
5141}
5142
5143static int r8169_mdio_read_reg(struct mii_bus *mii_bus, int phyaddr, int phyreg)
5144{
5145 struct rtl8169_private *tp = mii_bus->priv;
5146
5147 if (phyaddr > 0)
5148 return -ENODEV;
5149
5150 return rtl_readphy(tp, phyreg);
5151}
5152
5153static int r8169_mdio_write_reg(struct mii_bus *mii_bus, int phyaddr,
5154 int phyreg, u16 val)
5155{
5156 struct rtl8169_private *tp = mii_bus->priv;
5157
5158 if (phyaddr > 0)
5159 return -ENODEV;
5160
5161 rtl_writephy(tp, phyreg, val);
5162
5163 return 0;
5164}
5165
5166static int r8169_mdio_register(struct rtl8169_private *tp)
5167{
5168 struct pci_dev *pdev = tp->pci_dev;
5169 struct mii_bus *new_bus;
5170 int ret;
5171
5172 new_bus = devm_mdiobus_alloc(&pdev->dev);
5173 if (!new_bus)
5174 return -ENOMEM;
5175
5176 new_bus->name = "r8169";
5177 new_bus->priv = tp;
5178 new_bus->parent = &pdev->dev;
5179 new_bus->irq[0] = PHY_IGNORE_INTERRUPT;
5180 snprintf(new_bus->id, MII_BUS_ID_SIZE, "r8169-%x", pci_dev_id(pdev));
5181
5182 new_bus->read = r8169_mdio_read_reg;
5183 new_bus->write = r8169_mdio_write_reg;
5184
5185 ret = mdiobus_register(new_bus);
5186 if (ret)
5187 return ret;
5188
5189 tp->phydev = mdiobus_get_phy(new_bus, 0);
5190 if (!tp->phydev) {
5191 mdiobus_unregister(new_bus);
5192 return -ENODEV;
5193 } else if (!tp->phydev->drv) {
5194
5195
5196
5197 dev_err(&pdev->dev, "realtek.ko not loaded, maybe it needs to be added to initramfs?\n");
5198 mdiobus_unregister(new_bus);
5199 return -EUNATCH;
5200 }
5201
5202
5203 phy_suspend(tp->phydev);
5204
5205 return 0;
5206}
5207
5208static void rtl_hw_init_8168g(struct rtl8169_private *tp)
5209{
5210 tp->ocp_base = OCP_STD_PHY_BASE;
5211
5212 RTL_W32(tp, MISC, RTL_R32(tp, MISC) | RXDV_GATED_EN);
5213
5214 if (!rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 42))
5215 return;
5216
5217 if (!rtl_udelay_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42))
5218 return;
5219
5220 RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) & ~(CmdTxEnb | CmdRxEnb));
5221 msleep(1);
5222 RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB);
5223
5224 r8168_mac_ocp_modify(tp, 0xe8de, BIT(14), 0);
5225
5226 if (!rtl_udelay_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42))
5227 return;
5228
5229 r8168_mac_ocp_modify(tp, 0xe8de, 0, BIT(15));
5230
5231 rtl_udelay_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42);
5232}
5233
5234static void rtl_hw_init_8125(struct rtl8169_private *tp)
5235{
5236 tp->ocp_base = OCP_STD_PHY_BASE;
5237
5238 RTL_W32(tp, MISC, RTL_R32(tp, MISC) | RXDV_GATED_EN);
5239
5240 if (!rtl_udelay_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42))
5241 return;
5242
5243 RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) & ~(CmdTxEnb | CmdRxEnb));
5244 msleep(1);
5245 RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB);
5246
5247 r8168_mac_ocp_modify(tp, 0xe8de, BIT(14), 0);
5248
5249 if (!rtl_udelay_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42))
5250 return;
5251
5252 r8168_mac_ocp_write(tp, 0xc0aa, 0x07d0);
5253 r8168_mac_ocp_write(tp, 0xc0a6, 0x0150);
5254 r8168_mac_ocp_write(tp, 0xc01e, 0x5555);
5255
5256 rtl_udelay_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42);
5257}
5258
5259static void rtl_hw_initialize(struct rtl8169_private *tp)
5260{
5261 switch (tp->mac_version) {
5262 case RTL_GIGA_MAC_VER_49 ... RTL_GIGA_MAC_VER_52:
5263 rtl8168ep_stop_cmac(tp);
5264
5265 case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_48:
5266 rtl_hw_init_8168g(tp);
5267 break;
5268 case RTL_GIGA_MAC_VER_60 ... RTL_GIGA_MAC_VER_61:
5269 rtl_hw_init_8125(tp);
5270 break;
5271 default:
5272 break;
5273 }
5274}
5275
5276static int rtl_jumbo_max(struct rtl8169_private *tp)
5277{
5278
5279 if (!tp->supports_gmii)
5280 return 0;
5281
5282 switch (tp->mac_version) {
5283
5284 case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06:
5285 return JUMBO_7K;
5286
5287 case RTL_GIGA_MAC_VER_11:
5288 case RTL_GIGA_MAC_VER_12:
5289 case RTL_GIGA_MAC_VER_17:
5290 return JUMBO_4K;
5291
5292 case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_24:
5293 return JUMBO_6K;
5294 default:
5295 return JUMBO_9K;
5296 }
5297}
5298
5299static void rtl_disable_clk(void *data)
5300{
5301 clk_disable_unprepare(data);
5302}
5303
5304static int rtl_get_ether_clk(struct rtl8169_private *tp)
5305{
5306 struct device *d = tp_to_dev(tp);
5307 struct clk *clk;
5308 int rc;
5309
5310 clk = devm_clk_get(d, "ether_clk");
5311 if (IS_ERR(clk)) {
5312 rc = PTR_ERR(clk);
5313 if (rc == -ENOENT)
5314
5315 rc = 0;
5316 else if (rc != -EPROBE_DEFER)
5317 dev_err(d, "failed to get clk: %d\n", rc);
5318 } else {
5319 tp->clk = clk;
5320 rc = clk_prepare_enable(clk);
5321 if (rc)
5322 dev_err(d, "failed to enable clk: %d\n", rc);
5323 else
5324 rc = devm_add_action_or_reset(d, rtl_disable_clk, clk);
5325 }
5326
5327 return rc;
5328}
5329
5330static void rtl_init_mac_address(struct rtl8169_private *tp)
5331{
5332 struct net_device *dev = tp->dev;
5333 u8 *mac_addr = dev->dev_addr;
5334 int rc;
5335
5336 rc = eth_platform_get_mac_address(tp_to_dev(tp), mac_addr);
5337 if (!rc)
5338 goto done;
5339
5340 rtl_read_mac_address(tp, mac_addr);
5341 if (is_valid_ether_addr(mac_addr))
5342 goto done;
5343
5344 rtl_read_mac_from_reg(tp, mac_addr, MAC0);
5345 if (is_valid_ether_addr(mac_addr))
5346 goto done;
5347
5348 eth_hw_addr_random(dev);
5349 dev_warn(tp_to_dev(tp), "can't read MAC address, setting random one\n");
5350done:
5351 rtl_rar_set(tp, mac_addr);
5352}
5353
5354static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
5355{
5356 struct rtl8169_private *tp;
5357 int jumbo_max, region, rc;
5358 enum mac_version chipset;
5359 struct net_device *dev;
5360 u16 xid;
5361
5362 dev = devm_alloc_etherdev(&pdev->dev, sizeof (*tp));
5363 if (!dev)
5364 return -ENOMEM;
5365
5366 SET_NETDEV_DEV(dev, &pdev->dev);
5367 dev->netdev_ops = &rtl_netdev_ops;
5368 tp = netdev_priv(dev);
5369 tp->dev = dev;
5370 tp->pci_dev = pdev;
5371 tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT);
5372 tp->supports_gmii = ent->driver_data == RTL_CFG_NO_GBIT ? 0 : 1;
5373 tp->eee_adv = -1;
5374
5375
5376 rc = rtl_get_ether_clk(tp);
5377 if (rc)
5378 return rc;
5379
5380
5381
5382
5383 rc = pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S |
5384 PCIE_LINK_STATE_L1);
5385 tp->aspm_manageable = !rc;
5386
5387
5388 rc = pcim_enable_device(pdev);
5389 if (rc < 0) {
5390 dev_err(&pdev->dev, "enable failure\n");
5391 return rc;
5392 }
5393
5394 if (pcim_set_mwi(pdev) < 0)
5395 dev_info(&pdev->dev, "Mem-Wr-Inval unavailable\n");
5396
5397
5398 region = ffs(pci_select_bars(pdev, IORESOURCE_MEM)) - 1;
5399 if (region < 0) {
5400 dev_err(&pdev->dev, "no MMIO resource found\n");
5401 return -ENODEV;
5402 }
5403
5404
5405 if (pci_resource_len(pdev, region) < R8169_REGS_SIZE) {
5406 dev_err(&pdev->dev, "Invalid PCI region size(s), aborting\n");
5407 return -ENODEV;
5408 }
5409
5410 rc = pcim_iomap_regions(pdev, BIT(region), MODULENAME);
5411 if (rc < 0) {
5412 dev_err(&pdev->dev, "cannot remap MMIO, aborting\n");
5413 return rc;
5414 }
5415
5416 tp->mmio_addr = pcim_iomap_table(pdev)[region];
5417
5418 xid = (RTL_R32(tp, TxConfig) >> 20) & 0xfcf;
5419
5420
5421 chipset = rtl8169_get_mac_version(xid, tp->supports_gmii);
5422 if (chipset == RTL_GIGA_MAC_NONE) {
5423 dev_err(&pdev->dev, "unknown chip XID %03x\n", xid);
5424 return -ENODEV;
5425 }
5426
5427 tp->mac_version = chipset;
5428
5429 tp->cp_cmd = RTL_R16(tp, CPlusCmd);
5430
5431 if (sizeof(dma_addr_t) > 4 && tp->mac_version >= RTL_GIGA_MAC_VER_18 &&
5432 !dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)))
5433 dev->features |= NETIF_F_HIGHDMA;
5434
5435 rtl_init_rxcfg(tp);
5436
5437 rtl8169_irq_mask_and_ack(tp);
5438
5439 rtl_hw_initialize(tp);
5440
5441 rtl_hw_reset(tp);
5442
5443 pci_set_master(pdev);
5444
5445 rc = rtl_alloc_irq(tp);
5446 if (rc < 0) {
5447 dev_err(&pdev->dev, "Can't allocate interrupt\n");
5448 return rc;
5449 }
5450
5451 mutex_init(&tp->wk.mutex);
5452 INIT_WORK(&tp->wk.work, rtl_task);
5453 u64_stats_init(&tp->rx_stats.syncp);
5454 u64_stats_init(&tp->tx_stats.syncp);
5455
5456 rtl_init_mac_address(tp);
5457
5458 dev->ethtool_ops = &rtl8169_ethtool_ops;
5459
5460 netif_napi_add(dev, &tp->napi, rtl8169_poll, NAPI_POLL_WEIGHT);
5461
5462 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
5463 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
5464 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
5465 NETIF_F_HIGHDMA;
5466 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
5467
5468 tp->cp_cmd |= RxChkSum;
5469
5470 if (!rtl_is_8125(tp))
5471 tp->cp_cmd |= RxVlan;
5472
5473
5474
5475
5476 if (tp->mac_version == RTL_GIGA_MAC_VER_05)
5477
5478 dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
5479
5480 if (rtl_chip_supports_csum_v2(tp))
5481 dev->hw_features |= NETIF_F_IPV6_CSUM;
5482
5483 dev->features |= dev->hw_features;
5484
5485
5486
5487
5488
5489
5490 if (rtl_chip_supports_csum_v2(tp)) {
5491 dev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6;
5492 dev->gso_max_size = RTL_GSO_MAX_SIZE_V2;
5493 dev->gso_max_segs = RTL_GSO_MAX_SEGS_V2;
5494 } else {
5495 dev->hw_features |= NETIF_F_SG | NETIF_F_TSO;
5496 dev->gso_max_size = RTL_GSO_MAX_SIZE_V1;
5497 dev->gso_max_segs = RTL_GSO_MAX_SEGS_V1;
5498 }
5499
5500 dev->hw_features |= NETIF_F_RXALL;
5501 dev->hw_features |= NETIF_F_RXFCS;
5502
5503 jumbo_max = rtl_jumbo_max(tp);
5504 if (jumbo_max)
5505 dev->max_mtu = jumbo_max;
5506
5507 rtl_set_irq_mask(tp);
5508
5509 tp->fw_name = rtl_chip_infos[chipset].fw_name;
5510
5511 tp->counters = dmam_alloc_coherent (&pdev->dev, sizeof(*tp->counters),
5512 &tp->counters_phys_addr,
5513 GFP_KERNEL);
5514 if (!tp->counters)
5515 return -ENOMEM;
5516
5517 pci_set_drvdata(pdev, dev);
5518
5519 rc = r8169_mdio_register(tp);
5520 if (rc)
5521 return rc;
5522
5523
5524 rtl_pll_power_down(tp);
5525
5526 rc = register_netdev(dev);
5527 if (rc)
5528 goto err_mdio_unregister;
5529
5530 netif_info(tp, probe, dev, "%s, %pM, XID %03x, IRQ %d\n",
5531 rtl_chip_infos[chipset].name, dev->dev_addr, xid,
5532 pci_irq_vector(pdev, 0));
5533
5534 if (jumbo_max)
5535 netif_info(tp, probe, dev,
5536 "jumbo features [frames: %d bytes, tx checksumming: %s]\n",
5537 jumbo_max, tp->mac_version <= RTL_GIGA_MAC_VER_06 ?
5538 "ok" : "ko");
5539
5540 if (r8168_check_dash(tp))
5541 rtl8168_driver_start(tp);
5542
5543 if (pci_dev_run_wake(pdev))
5544 pm_runtime_put_sync(&pdev->dev);
5545
5546 return 0;
5547
5548err_mdio_unregister:
5549 mdiobus_unregister(tp->phydev->mdio.bus);
5550 return rc;
5551}
5552
5553static struct pci_driver rtl8169_pci_driver = {
5554 .name = MODULENAME,
5555 .id_table = rtl8169_pci_tbl,
5556 .probe = rtl_init_one,
5557 .remove = rtl_remove_one,
5558 .shutdown = rtl_shutdown,
5559 .driver.pm = RTL8169_PM_OPS,
5560};
5561
5562module_pci_driver(rtl8169_pci_driver);
5563