1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117#include <linux/module.h>
118#include <linux/device.h>
119#include <linux/spinlock.h>
120#include <linux/netdevice.h>
121#include <linux/etherdevice.h>
122#include <linux/io.h>
123
124#include "xgbe.h"
125#include "xgbe-common.h"
126
127MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>");
128MODULE_LICENSE("Dual BSD/GPL");
129MODULE_VERSION(XGBE_DRV_VERSION);
130MODULE_DESCRIPTION(XGBE_DRV_DESC);
131
132static int debug = -1;
133module_param(debug, int, S_IWUSR | S_IRUGO);
134MODULE_PARM_DESC(debug, " Network interface message level setting");
135
136static const u32 default_msg_level = (NETIF_MSG_LINK | NETIF_MSG_IFDOWN |
137 NETIF_MSG_IFUP);
138
139static void xgbe_default_config(struct xgbe_prv_data *pdata)
140{
141 DBGPR("-->xgbe_default_config\n");
142
143 pdata->pblx8 = DMA_PBL_X8_ENABLE;
144 pdata->tx_sf_mode = MTL_TSF_ENABLE;
145 pdata->tx_threshold = MTL_TX_THRESHOLD_64;
146 pdata->tx_pbl = DMA_PBL_16;
147 pdata->tx_osp_mode = DMA_OSP_ENABLE;
148 pdata->rx_sf_mode = MTL_RSF_DISABLE;
149 pdata->rx_threshold = MTL_RX_THRESHOLD_64;
150 pdata->rx_pbl = DMA_PBL_16;
151 pdata->pause_autoneg = 1;
152 pdata->tx_pause = 1;
153 pdata->rx_pause = 1;
154 pdata->phy_speed = SPEED_UNKNOWN;
155 pdata->power_down = 0;
156
157 DBGPR("<--xgbe_default_config\n");
158}
159
160static void xgbe_init_all_fptrs(struct xgbe_prv_data *pdata)
161{
162 xgbe_init_function_ptrs_dev(&pdata->hw_if);
163 xgbe_init_function_ptrs_phy(&pdata->phy_if);
164 xgbe_init_function_ptrs_i2c(&pdata->i2c_if);
165 xgbe_init_function_ptrs_desc(&pdata->desc_if);
166
167 pdata->vdata->init_function_ptrs_phy_impl(&pdata->phy_if);
168}
169
170struct xgbe_prv_data *xgbe_alloc_pdata(struct device *dev)
171{
172 struct xgbe_prv_data *pdata;
173 struct net_device *netdev;
174
175 netdev = alloc_etherdev_mq(sizeof(struct xgbe_prv_data),
176 XGBE_MAX_DMA_CHANNELS);
177 if (!netdev) {
178 dev_err(dev, "alloc_etherdev_mq failed\n");
179 return ERR_PTR(-ENOMEM);
180 }
181 SET_NETDEV_DEV(netdev, dev);
182 pdata = netdev_priv(netdev);
183 pdata->netdev = netdev;
184 pdata->dev = dev;
185
186 spin_lock_init(&pdata->lock);
187 spin_lock_init(&pdata->xpcs_lock);
188 mutex_init(&pdata->rss_mutex);
189 spin_lock_init(&pdata->tstamp_lock);
190 mutex_init(&pdata->i2c_mutex);
191 init_completion(&pdata->i2c_complete);
192 init_completion(&pdata->mdio_complete);
193
194 pdata->msg_enable = netif_msg_init(debug, default_msg_level);
195
196 set_bit(XGBE_DOWN, &pdata->dev_state);
197 set_bit(XGBE_STOPPED, &pdata->dev_state);
198
199 return pdata;
200}
201
202void xgbe_free_pdata(struct xgbe_prv_data *pdata)
203{
204 struct net_device *netdev = pdata->netdev;
205
206 free_netdev(netdev);
207}
208
209void xgbe_set_counts(struct xgbe_prv_data *pdata)
210{
211
212 xgbe_init_all_fptrs(pdata);
213
214
215 xgbe_get_all_hw_features(pdata);
216
217
218 if (!pdata->tx_max_channel_count)
219 pdata->tx_max_channel_count = pdata->hw_feat.tx_ch_cnt;
220 if (!pdata->rx_max_channel_count)
221 pdata->rx_max_channel_count = pdata->hw_feat.rx_ch_cnt;
222
223 if (!pdata->tx_max_q_count)
224 pdata->tx_max_q_count = pdata->hw_feat.tx_q_cnt;
225 if (!pdata->rx_max_q_count)
226 pdata->rx_max_q_count = pdata->hw_feat.rx_q_cnt;
227
228
229
230
231
232
233
234
235 pdata->tx_ring_count = min_t(unsigned int, num_online_cpus(),
236 pdata->hw_feat.tx_ch_cnt);
237 pdata->tx_ring_count = min_t(unsigned int, pdata->tx_ring_count,
238 pdata->tx_max_channel_count);
239 pdata->tx_ring_count = min_t(unsigned int, pdata->tx_ring_count,
240 pdata->tx_max_q_count);
241
242 pdata->tx_q_count = pdata->tx_ring_count;
243
244 pdata->rx_ring_count = min_t(unsigned int, num_online_cpus(),
245 pdata->hw_feat.rx_ch_cnt);
246 pdata->rx_ring_count = min_t(unsigned int, pdata->rx_ring_count,
247 pdata->rx_max_channel_count);
248
249 pdata->rx_q_count = min_t(unsigned int, pdata->hw_feat.rx_q_cnt,
250 pdata->rx_max_q_count);
251
252 if (netif_msg_probe(pdata)) {
253 dev_dbg(pdata->dev, "TX/RX DMA channel count = %u/%u\n",
254 pdata->tx_ring_count, pdata->rx_ring_count);
255 dev_dbg(pdata->dev, "TX/RX hardware queue count = %u/%u\n",
256 pdata->tx_q_count, pdata->rx_q_count);
257 }
258}
259
260int xgbe_config_netdev(struct xgbe_prv_data *pdata)
261{
262 struct net_device *netdev = pdata->netdev;
263 struct device *dev = pdata->dev;
264 unsigned int i;
265 int ret;
266
267 netdev->irq = pdata->dev_irq;
268 netdev->base_addr = (unsigned long)pdata->xgmac_regs;
269 memcpy(netdev->dev_addr, pdata->mac_addr, netdev->addr_len);
270
271
272 pdata->tx_sec_period = jiffies;
273 pdata->tx_ded_period = jiffies;
274 pdata->rx_sec_period = jiffies;
275 pdata->rx_ded_period = jiffies;
276 pdata->desc_sec_period = jiffies;
277 pdata->desc_ded_period = jiffies;
278
279
280 pdata->hw_if.exit(pdata);
281
282
283 xgbe_default_config(pdata);
284
285
286 ret = dma_set_mask_and_coherent(dev,
287 DMA_BIT_MASK(pdata->hw_feat.dma_width));
288 if (ret) {
289 dev_err(dev, "dma_set_mask_and_coherent failed\n");
290 return ret;
291 }
292
293
294 if (!pdata->tx_max_fifo_size)
295 pdata->tx_max_fifo_size = pdata->hw_feat.tx_fifo_size;
296 if (!pdata->rx_max_fifo_size)
297 pdata->rx_max_fifo_size = pdata->hw_feat.rx_fifo_size;
298
299
300 BUILD_BUG_ON_NOT_POWER_OF_2(XGBE_TX_DESC_CNT);
301 pdata->tx_desc_count = XGBE_TX_DESC_CNT;
302
303 BUILD_BUG_ON_NOT_POWER_OF_2(XGBE_RX_DESC_CNT);
304 pdata->rx_desc_count = XGBE_RX_DESC_CNT;
305
306
307 if (pdata->channel_irq_count) {
308 pdata->tx_ring_count = min_t(unsigned int, pdata->tx_ring_count,
309 pdata->channel_irq_count);
310 pdata->rx_ring_count = min_t(unsigned int, pdata->rx_ring_count,
311 pdata->channel_irq_count);
312
313 if (netif_msg_probe(pdata))
314 dev_dbg(pdata->dev,
315 "adjusted TX/RX DMA channel count = %u/%u\n",
316 pdata->tx_ring_count, pdata->rx_ring_count);
317 }
318
319
320 ret = netif_set_real_num_tx_queues(netdev, pdata->tx_ring_count);
321 if (ret) {
322 dev_err(dev, "error setting real tx queue count\n");
323 return ret;
324 }
325
326 ret = netif_set_real_num_rx_queues(netdev, pdata->rx_ring_count);
327 if (ret) {
328 dev_err(dev, "error setting real rx queue count\n");
329 return ret;
330 }
331
332
333 netdev_rss_key_fill(pdata->rss_key, sizeof(pdata->rss_key));
334
335 for (i = 0; i < XGBE_RSS_MAX_TABLE_SIZE; i++)
336 XGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH,
337 i % pdata->rx_ring_count);
338
339 XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, IP2TE, 1);
340 XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, TCP4TE, 1);
341 XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1);
342
343
344 ret = pdata->phy_if.phy_init(pdata);
345 if (ret)
346 return ret;
347
348
349 netdev->netdev_ops = xgbe_get_netdev_ops();
350 netdev->ethtool_ops = xgbe_get_ethtool_ops();
351#ifdef CONFIG_AMD_XGBE_DCB
352 netdev->dcbnl_ops = xgbe_get_dcbnl_ops();
353#endif
354
355
356 netdev->hw_features = NETIF_F_SG |
357 NETIF_F_IP_CSUM |
358 NETIF_F_IPV6_CSUM |
359 NETIF_F_RXCSUM |
360 NETIF_F_TSO |
361 NETIF_F_TSO6 |
362 NETIF_F_GRO |
363 NETIF_F_HW_VLAN_CTAG_RX |
364 NETIF_F_HW_VLAN_CTAG_TX |
365 NETIF_F_HW_VLAN_CTAG_FILTER;
366
367 if (pdata->hw_feat.rss)
368 netdev->hw_features |= NETIF_F_RXHASH;
369
370 netdev->vlan_features |= NETIF_F_SG |
371 NETIF_F_IP_CSUM |
372 NETIF_F_IPV6_CSUM |
373 NETIF_F_TSO |
374 NETIF_F_TSO6;
375
376 netdev->features |= netdev->hw_features;
377 pdata->netdev_features = netdev->features;
378
379 netdev->priv_flags |= IFF_UNICAST_FLT;
380
381
382 netdev->watchdog_timeo = 0;
383
384 xgbe_init_rx_coalesce(pdata);
385 xgbe_init_tx_coalesce(pdata);
386
387 netif_carrier_off(netdev);
388 ret = register_netdev(netdev);
389 if (ret) {
390 dev_err(dev, "net device registration failed\n");
391 return ret;
392 }
393
394
395 snprintf(pdata->an_name, sizeof(pdata->an_name) - 1, "%s-pcs",
396 netdev_name(netdev));
397
398
399 snprintf(pdata->ecc_name, sizeof(pdata->ecc_name) - 1, "%s-ecc",
400 netdev_name(netdev));
401
402
403 snprintf(pdata->i2c_name, sizeof(pdata->i2c_name) - 1, "%s-i2c",
404 netdev_name(netdev));
405
406
407 pdata->dev_workqueue =
408 create_singlethread_workqueue(netdev_name(netdev));
409 if (!pdata->dev_workqueue) {
410 netdev_err(netdev, "device workqueue creation failed\n");
411 ret = -ENOMEM;
412 goto err_netdev;
413 }
414
415 pdata->an_workqueue =
416 create_singlethread_workqueue(pdata->an_name);
417 if (!pdata->an_workqueue) {
418 netdev_err(netdev, "phy workqueue creation failed\n");
419 ret = -ENOMEM;
420 goto err_wq;
421 }
422
423 xgbe_ptp_register(pdata);
424
425 xgbe_debugfs_init(pdata);
426
427 netif_dbg(pdata, drv, pdata->netdev, "%u Tx software queues\n",
428 pdata->tx_ring_count);
429 netif_dbg(pdata, drv, pdata->netdev, "%u Rx software queues\n",
430 pdata->rx_ring_count);
431
432 return 0;
433
434err_wq:
435 destroy_workqueue(pdata->dev_workqueue);
436
437err_netdev:
438 unregister_netdev(netdev);
439
440 return ret;
441}
442
443void xgbe_deconfig_netdev(struct xgbe_prv_data *pdata)
444{
445 struct net_device *netdev = pdata->netdev;
446
447 xgbe_debugfs_exit(pdata);
448
449 xgbe_ptp_unregister(pdata);
450
451 pdata->phy_if.phy_exit(pdata);
452
453 flush_workqueue(pdata->an_workqueue);
454 destroy_workqueue(pdata->an_workqueue);
455
456 flush_workqueue(pdata->dev_workqueue);
457 destroy_workqueue(pdata->dev_workqueue);
458
459 unregister_netdev(netdev);
460}
461
462static int __init xgbe_mod_init(void)
463{
464 int ret;
465
466 ret = xgbe_platform_init();
467 if (ret)
468 return ret;
469
470 ret = xgbe_pci_init();
471 if (ret)
472 return ret;
473
474 return 0;
475}
476
477static void __exit xgbe_mod_exit(void)
478{
479 xgbe_pci_exit();
480
481 xgbe_platform_exit();
482}
483
484module_init(xgbe_mod_init);
485module_exit(xgbe_mod_exit);
486