1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#include <linux/of_gpio.h>
22#include <linux/gpio.h>
23#include "xgene_enet_main.h"
24#include "xgene_enet_hw.h"
25#include "xgene_enet_xgmac.h"
26
27static void xgene_enet_wr_csr(struct xgene_enet_pdata *pdata,
28 u32 offset, u32 val)
29{
30 void __iomem *addr = pdata->eth_csr_addr + offset;
31
32 iowrite32(val, addr);
33}
34
35static void xgene_enet_wr_ring_if(struct xgene_enet_pdata *pdata,
36 u32 offset, u32 val)
37{
38 void __iomem *addr = pdata->eth_ring_if_addr + offset;
39
40 iowrite32(val, addr);
41}
42
43static void xgene_enet_wr_diag_csr(struct xgene_enet_pdata *pdata,
44 u32 offset, u32 val)
45{
46 void __iomem *addr = pdata->eth_diag_csr_addr + offset;
47
48 iowrite32(val, addr);
49}
50
51static bool xgene_enet_wr_indirect(void __iomem *addr, void __iomem *wr,
52 void __iomem *cmd, void __iomem *cmd_done,
53 u32 wr_addr, u32 wr_data)
54{
55 u32 done;
56 u8 wait = 10;
57
58 iowrite32(wr_addr, addr);
59 iowrite32(wr_data, wr);
60 iowrite32(XGENE_ENET_WR_CMD, cmd);
61
62
63 while (!(done = ioread32(cmd_done)) && wait--)
64 udelay(1);
65
66 if (!done)
67 return false;
68
69 iowrite32(0, cmd);
70
71 return true;
72}
73
74static void xgene_enet_wr_mac(struct xgene_enet_pdata *pdata,
75 u32 wr_addr, u32 wr_data)
76{
77 void __iomem *addr, *wr, *cmd, *cmd_done;
78
79 addr = pdata->mcx_mac_addr + MAC_ADDR_REG_OFFSET;
80 wr = pdata->mcx_mac_addr + MAC_WRITE_REG_OFFSET;
81 cmd = pdata->mcx_mac_addr + MAC_COMMAND_REG_OFFSET;
82 cmd_done = pdata->mcx_mac_addr + MAC_COMMAND_DONE_REG_OFFSET;
83
84 if (!xgene_enet_wr_indirect(addr, wr, cmd, cmd_done, wr_addr, wr_data))
85 netdev_err(pdata->ndev, "MCX mac write failed, addr: %04x\n",
86 wr_addr);
87}
88
89static void xgene_enet_wr_pcs(struct xgene_enet_pdata *pdata,
90 u32 wr_addr, u32 wr_data)
91{
92 void __iomem *addr, *wr, *cmd, *cmd_done;
93
94 addr = pdata->pcs_addr + PCS_ADDR_REG_OFFSET;
95 wr = pdata->pcs_addr + PCS_WRITE_REG_OFFSET;
96 cmd = pdata->pcs_addr + PCS_COMMAND_REG_OFFSET;
97 cmd_done = pdata->pcs_addr + PCS_COMMAND_DONE_REG_OFFSET;
98
99 if (!xgene_enet_wr_indirect(addr, wr, cmd, cmd_done, wr_addr, wr_data))
100 netdev_err(pdata->ndev, "PCS write failed, addr: %04x\n",
101 wr_addr);
102}
103
104static void xgene_enet_rd_csr(struct xgene_enet_pdata *pdata,
105 u32 offset, u32 *val)
106{
107 void __iomem *addr = pdata->eth_csr_addr + offset;
108
109 *val = ioread32(addr);
110}
111
112static void xgene_enet_rd_diag_csr(struct xgene_enet_pdata *pdata,
113 u32 offset, u32 *val)
114{
115 void __iomem *addr = pdata->eth_diag_csr_addr + offset;
116
117 *val = ioread32(addr);
118}
119
120static bool xgene_enet_rd_indirect(void __iomem *addr, void __iomem *rd,
121 void __iomem *cmd, void __iomem *cmd_done,
122 u32 rd_addr, u32 *rd_data)
123{
124 u32 done;
125 u8 wait = 10;
126
127 iowrite32(rd_addr, addr);
128 iowrite32(XGENE_ENET_RD_CMD, cmd);
129
130
131 while (!(done = ioread32(cmd_done)) && wait--)
132 udelay(1);
133
134 if (!done)
135 return false;
136
137 *rd_data = ioread32(rd);
138 iowrite32(0, cmd);
139
140 return true;
141}
142
143static void xgene_enet_rd_mac(struct xgene_enet_pdata *pdata,
144 u32 rd_addr, u32 *rd_data)
145{
146 void __iomem *addr, *rd, *cmd, *cmd_done;
147
148 addr = pdata->mcx_mac_addr + MAC_ADDR_REG_OFFSET;
149 rd = pdata->mcx_mac_addr + MAC_READ_REG_OFFSET;
150 cmd = pdata->mcx_mac_addr + MAC_COMMAND_REG_OFFSET;
151 cmd_done = pdata->mcx_mac_addr + MAC_COMMAND_DONE_REG_OFFSET;
152
153 if (!xgene_enet_rd_indirect(addr, rd, cmd, cmd_done, rd_addr, rd_data))
154 netdev_err(pdata->ndev, "MCX mac read failed, addr: %04x\n",
155 rd_addr);
156}
157
158static bool xgene_enet_rd_pcs(struct xgene_enet_pdata *pdata,
159 u32 rd_addr, u32 *rd_data)
160{
161 void __iomem *addr, *rd, *cmd, *cmd_done;
162 bool success;
163
164 addr = pdata->pcs_addr + PCS_ADDR_REG_OFFSET;
165 rd = pdata->pcs_addr + PCS_READ_REG_OFFSET;
166 cmd = pdata->pcs_addr + PCS_COMMAND_REG_OFFSET;
167 cmd_done = pdata->pcs_addr + PCS_COMMAND_DONE_REG_OFFSET;
168
169 success = xgene_enet_rd_indirect(addr, rd, cmd, cmd_done, rd_addr, rd_data);
170 if (!success)
171 netdev_err(pdata->ndev, "PCS read failed, addr: %04x\n",
172 rd_addr);
173
174 return success;
175}
176
177static int xgene_enet_ecc_init(struct xgene_enet_pdata *pdata)
178{
179 struct net_device *ndev = pdata->ndev;
180 u32 data;
181 u8 wait = 10;
182
183 xgene_enet_wr_diag_csr(pdata, ENET_CFG_MEM_RAM_SHUTDOWN_ADDR, 0x0);
184 do {
185 usleep_range(100, 110);
186 xgene_enet_rd_diag_csr(pdata, ENET_BLOCK_MEM_RDY_ADDR, &data);
187 } while ((data != 0xffffffff) && wait--);
188
189 if (data != 0xffffffff) {
190 netdev_err(ndev, "Failed to release memory from shutdown\n");
191 return -ENODEV;
192 }
193
194 return 0;
195}
196
197static void xgene_enet_config_ring_if_assoc(struct xgene_enet_pdata *pdata)
198{
199 xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIWQASSOC_ADDR, 0);
200 xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIFPQASSOC_ADDR, 0);
201 xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIQMLITEWQASSOC_ADDR, 0);
202 xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIQMLITEFPQASSOC_ADDR, 0);
203}
204
205static void xgene_xgmac_reset(struct xgene_enet_pdata *pdata)
206{
207 xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_0, HSTMACRST);
208 xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_0, 0);
209}
210
211static void xgene_pcs_reset(struct xgene_enet_pdata *pdata)
212{
213 u32 data;
214
215 if (!xgene_enet_rd_pcs(pdata, PCS_CONTROL_1, &data))
216 return;
217
218 xgene_enet_wr_pcs(pdata, PCS_CONTROL_1, data | PCS_CTRL_PCS_RST);
219 xgene_enet_wr_pcs(pdata, PCS_CONTROL_1, data & ~PCS_CTRL_PCS_RST);
220}
221
222static void xgene_xgmac_set_mac_addr(struct xgene_enet_pdata *pdata)
223{
224 u32 addr0, addr1;
225 u8 *dev_addr = pdata->ndev->dev_addr;
226
227 addr0 = (dev_addr[3] << 24) | (dev_addr[2] << 16) |
228 (dev_addr[1] << 8) | dev_addr[0];
229 addr1 = (dev_addr[5] << 24) | (dev_addr[4] << 16);
230
231 xgene_enet_wr_mac(pdata, HSTMACADR_LSW_ADDR, addr0);
232 xgene_enet_wr_mac(pdata, HSTMACADR_MSW_ADDR, addr1);
233}
234
235static void xgene_xgmac_set_mss(struct xgene_enet_pdata *pdata,
236 u16 mss, u8 index)
237{
238 u8 offset;
239 u32 data;
240
241 offset = (index < 2) ? 0 : 4;
242 xgene_enet_rd_csr(pdata, XG_TSIF_MSS_REG0_ADDR + offset, &data);
243
244 if (!(index & 0x1))
245 data = SET_VAL(TSO_MSS1, data >> TSO_MSS1_POS) |
246 SET_VAL(TSO_MSS0, mss);
247 else
248 data = SET_VAL(TSO_MSS1, mss) | SET_VAL(TSO_MSS0, data);
249
250 xgene_enet_wr_csr(pdata, XG_TSIF_MSS_REG0_ADDR + offset, data);
251}
252
253static u32 xgene_enet_link_status(struct xgene_enet_pdata *pdata)
254{
255 u32 data;
256
257 xgene_enet_rd_csr(pdata, XG_LINK_STATUS_ADDR, &data);
258
259 return data;
260}
261
262static void xgene_xgmac_init(struct xgene_enet_pdata *pdata)
263{
264 u32 data;
265
266 xgene_xgmac_reset(pdata);
267
268 xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1, &data);
269 data |= HSTPPEN;
270 data &= ~HSTLENCHK;
271 xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data);
272
273 xgene_xgmac_set_mac_addr(pdata);
274
275 xgene_enet_rd_csr(pdata, XG_RSIF_CONFIG_REG_ADDR, &data);
276 data |= CFG_RSIF_FPBUFF_TIMEOUT_EN;
277 xgene_enet_wr_csr(pdata, XG_RSIF_CONFIG_REG_ADDR, data);
278
279 xgene_enet_rd_csr(pdata, XG_ENET_SPARE_CFG_REG_ADDR, &data);
280 data |= BIT(12);
281 xgene_enet_wr_csr(pdata, XG_ENET_SPARE_CFG_REG_ADDR, data);
282 xgene_enet_wr_csr(pdata, XG_ENET_SPARE_CFG_REG_1_ADDR, 0x82);
283 xgene_enet_wr_csr(pdata, XGENET_RX_DV_GATE_REG_0_ADDR, 0);
284 xgene_enet_wr_csr(pdata, XG_CFG_BYPASS_ADDR, RESUME_TX);
285}
286
287static void xgene_xgmac_rx_enable(struct xgene_enet_pdata *pdata)
288{
289 u32 data;
290
291 xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1, &data);
292 xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data | HSTRFEN);
293}
294
295static void xgene_xgmac_tx_enable(struct xgene_enet_pdata *pdata)
296{
297 u32 data;
298
299 xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1, &data);
300 xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data | HSTTFEN);
301}
302
303static void xgene_xgmac_rx_disable(struct xgene_enet_pdata *pdata)
304{
305 u32 data;
306
307 xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1, &data);
308 xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data & ~HSTRFEN);
309}
310
311static void xgene_xgmac_tx_disable(struct xgene_enet_pdata *pdata)
312{
313 u32 data;
314
315 xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1, &data);
316 xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data & ~HSTTFEN);
317}
318
319static int xgene_enet_reset(struct xgene_enet_pdata *pdata)
320{
321 struct device *dev = &pdata->pdev->dev;
322
323 if (!xgene_ring_mgr_init(pdata))
324 return -ENODEV;
325
326 if (dev->of_node) {
327 clk_prepare_enable(pdata->clk);
328 udelay(5);
329 clk_disable_unprepare(pdata->clk);
330 udelay(5);
331 clk_prepare_enable(pdata->clk);
332 udelay(5);
333 } else {
334#ifdef CONFIG_ACPI
335 if (acpi_has_method(ACPI_HANDLE(&pdata->pdev->dev), "_RST")) {
336 acpi_evaluate_object(ACPI_HANDLE(&pdata->pdev->dev),
337 "_RST", NULL, NULL);
338 } else if (acpi_has_method(ACPI_HANDLE(&pdata->pdev->dev),
339 "_INI")) {
340 acpi_evaluate_object(ACPI_HANDLE(&pdata->pdev->dev),
341 "_INI", NULL, NULL);
342 }
343#endif
344 }
345
346 xgene_enet_ecc_init(pdata);
347 xgene_enet_config_ring_if_assoc(pdata);
348
349 return 0;
350}
351
352static void xgene_enet_xgcle_bypass(struct xgene_enet_pdata *pdata,
353 u32 dst_ring_num, u16 bufpool_id)
354{
355 u32 cb, fpsel;
356
357 xgene_enet_rd_csr(pdata, XCLE_BYPASS_REG0_ADDR, &cb);
358 cb |= CFG_CLE_BYPASS_EN0;
359 CFG_CLE_IP_PROTOCOL0_SET(&cb, 3);
360 xgene_enet_wr_csr(pdata, XCLE_BYPASS_REG0_ADDR, cb);
361
362 fpsel = xgene_enet_ring_bufnum(bufpool_id) - 0x20;
363 xgene_enet_rd_csr(pdata, XCLE_BYPASS_REG1_ADDR, &cb);
364 CFG_CLE_DSTQID0_SET(&cb, dst_ring_num);
365 CFG_CLE_FPSEL0_SET(&cb, fpsel);
366 xgene_enet_wr_csr(pdata, XCLE_BYPASS_REG1_ADDR, cb);
367}
368
369static void xgene_enet_shutdown(struct xgene_enet_pdata *pdata)
370{
371 struct device *dev = &pdata->pdev->dev;
372 struct xgene_enet_desc_ring *ring;
373 u32 pb, val;
374 int i;
375
376 pb = 0;
377 for (i = 0; i < pdata->rxq_cnt; i++) {
378 ring = pdata->rx_ring[i]->buf_pool;
379
380 val = xgene_enet_ring_bufnum(ring->id);
381 pb |= BIT(val - 0x20);
382 }
383 xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIFPRESET_ADDR, pb);
384
385 pb = 0;
386 for (i = 0; i < pdata->txq_cnt; i++) {
387 ring = pdata->tx_ring[i];
388
389 val = xgene_enet_ring_bufnum(ring->id);
390 pb |= BIT(val);
391 }
392 xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIWQRESET_ADDR, pb);
393
394 if (dev->of_node) {
395 if (!IS_ERR(pdata->clk))
396 clk_disable_unprepare(pdata->clk);
397 }
398}
399
400static void xgene_enet_clear(struct xgene_enet_pdata *pdata,
401 struct xgene_enet_desc_ring *ring)
402{
403 u32 addr, val, data;
404
405 val = xgene_enet_ring_bufnum(ring->id);
406
407 if (xgene_enet_is_bufpool(ring->id)) {
408 addr = ENET_CFGSSQMIFPRESET_ADDR;
409 data = BIT(val - 0x20);
410 } else {
411 addr = ENET_CFGSSQMIWQRESET_ADDR;
412 data = BIT(val);
413 }
414
415 xgene_enet_wr_ring_if(pdata, addr, data);
416}
417
418static void xgene_enet_link_state(struct work_struct *work)
419{
420 struct xgene_enet_pdata *pdata = container_of(to_delayed_work(work),
421 struct xgene_enet_pdata, link_work);
422 struct gpio_desc *sfp_rdy = pdata->sfp_rdy;
423 struct net_device *ndev = pdata->ndev;
424 u32 link_status, poll_interval;
425
426 link_status = xgene_enet_link_status(pdata);
427 if (link_status && !IS_ERR(sfp_rdy) && !gpiod_get_value(sfp_rdy))
428 link_status = 0;
429
430 if (link_status) {
431 if (!netif_carrier_ok(ndev)) {
432 netif_carrier_on(ndev);
433 xgene_xgmac_rx_enable(pdata);
434 xgene_xgmac_tx_enable(pdata);
435 netdev_info(ndev, "Link is Up - 10Gbps\n");
436 }
437 poll_interval = PHY_POLL_LINK_ON;
438 } else {
439 if (netif_carrier_ok(ndev)) {
440 xgene_xgmac_rx_disable(pdata);
441 xgene_xgmac_tx_disable(pdata);
442 netif_carrier_off(ndev);
443 netdev_info(ndev, "Link is Down\n");
444 }
445 poll_interval = PHY_POLL_LINK_OFF;
446
447 xgene_pcs_reset(pdata);
448 }
449
450 schedule_delayed_work(&pdata->link_work, poll_interval);
451}
452
453const struct xgene_mac_ops xgene_xgmac_ops = {
454 .init = xgene_xgmac_init,
455 .reset = xgene_xgmac_reset,
456 .rx_enable = xgene_xgmac_rx_enable,
457 .tx_enable = xgene_xgmac_tx_enable,
458 .rx_disable = xgene_xgmac_rx_disable,
459 .tx_disable = xgene_xgmac_tx_disable,
460 .set_mac_addr = xgene_xgmac_set_mac_addr,
461 .set_mss = xgene_xgmac_set_mss,
462 .link_state = xgene_enet_link_state
463};
464
465const struct xgene_port_ops xgene_xgport_ops = {
466 .reset = xgene_enet_reset,
467 .clear = xgene_enet_clear,
468 .cle_bypass = xgene_enet_xgcle_bypass,
469 .shutdown = xgene_enet_shutdown,
470};
471